Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6

* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (156 commits)
  [PATCH] x86-64: Export smp_call_function_single
  [PATCH] i386: Clean up smp_tune_scheduling()
  [PATCH] unwinder: move .eh_frame to RODATA
  [PATCH] unwinder: fully support linker generated .eh_frame_hdr section
  [PATCH] x86-64: don't use set_irq_regs()
  [PATCH] x86-64: check vector in setup_ioapic_dest to verify if need setup_IO_APIC_irq
  [PATCH] x86-64: Make ix86 default to HIGHMEM4G instead of NOHIGHMEM
  [PATCH] i386: replace kmalloc+memset with kzalloc
  [PATCH] x86-64: remove remaining pc98 code
  [PATCH] x86-64: remove unused variable
  [PATCH] x86-64: Fix constraints in atomic_add_return()
  [PATCH] x86-64: fix asm constraints in i386 atomic_add_return
  [PATCH] x86-64: Correct documentation for bzImage protocol v2.05
  [PATCH] x86-64: replace kmalloc+memset with kzalloc in MTRR code
  [PATCH] x86-64: Fix numaq build error
  [PATCH] x86-64: include/asm-x86_64/cpufeature.h isn't a userspace header
  [PATCH] unwinder: Add debugging output to the Dwarf2 unwinder
  [PATCH] x86-64: Clarify error message in GART code
  [PATCH] x86-64: Fix interrupt race in idle callback (3rd try)
  [PATCH] x86-64: Remove unwind stack pointer alignment forcing again
  ...

Fixed conflict in include/linux/uaccess.h manually

Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 0543162..805db4b 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -77,7 +77,7 @@
 Many drivers need lots of small dma-coherent memory regions for DMA
 descriptors or I/O buffers.  Rather than allocating in units of a page
 or more using dma_alloc_coherent(), you can use DMA pools.  These work
-much like a kmem_cache_t, except that they use the dma-coherent allocator
+much like a struct kmem_cache, except that they use the dma-coherent allocator
 not __get_free_pages().  Also, they understand common hardware constraints
 for alignment, like queue heads needing to be aligned on N byte boundaries.
 
@@ -94,7 +94,7 @@
 for use with a given device.  It must be called in a context which
 can sleep.
 
-The "name" is for diagnostics (like a kmem_cache_t name); dev and size
+The "name" is for diagnostics (like a struct kmem_cache name); dev and size
 are like what you'd pass to dma_alloc_coherent().  The device's hardware
 alignment requirement for this type of data is "align" (which is expressed
 in bytes, and must be a power of two).  If your device has no boundary
@@ -431,10 +431,10 @@
 dma_alloc_noncoherent()).
 
 int
-dma_is_consistent(dma_addr_t dma_handle)
+dma_is_consistent(struct device *dev, dma_addr_t dma_handle)
 
-returns true if the memory pointed to by the dma_handle is actually
-consistent.
+returns true if the device dev is performing consistent DMA on the memory
+area pointed to by the dma_handle.
 
 int
 dma_get_cache_alignment(void)
@@ -459,7 +459,7 @@
 memory you intend to sync partially.
 
 void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 
 Do a partial sync of memory that was allocated by
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index db9499a..36526a1 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -190,9 +190,13 @@
 ###
 # Help targets as used by the top-level makefile
 dochelp:
-	@echo  '  Linux kernel internal documentation in different formats:'
-	@echo  '  xmldocs (XML DocBook), psdocs (Postscript), pdfdocs (PDF)'
-	@echo  '  htmldocs (HTML), mandocs (man pages, use installmandocs to install)'
+	@echo  ' Linux kernel internal documentation in different formats:'
+	@echo  '  htmldocs        - HTML'
+	@echo  '  installmandocs  - install man pages generated by mandocs'
+	@echo  '  mandocs         - man pages'
+	@echo  '  pdfdocs         - PDF'
+	@echo  '  psdocs          - Postscript'
+	@echo  '  xmldocs         - XML DocBook'
 
 ###
 # Temporary files left by various tools
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index a166675..ca09491 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -418,9 +418,35 @@
 !Idrivers/parport/daisy.c
   </chapter>
 
-  <chapter id="viddev">
-     <title>Video4Linux</title>
-!Edrivers/media/video/videodev.c
+  <chapter id="message_devices">
+	<title>Message-based devices</title>
+     <sect1><title>Fusion message devices</title>
+!Edrivers/message/fusion/mptbase.c
+!Idrivers/message/fusion/mptbase.c
+!Edrivers/message/fusion/mptscsih.c
+!Idrivers/message/fusion/mptscsih.c
+!Idrivers/message/fusion/mptctl.c
+!Idrivers/message/fusion/mptspi.c
+!Idrivers/message/fusion/mptfc.c
+!Idrivers/message/fusion/mptlan.c
+     </sect1>
+     <sect1><title>I2O message devices</title>
+!Iinclude/linux/i2o.h
+!Idrivers/message/i2o/core.h
+!Edrivers/message/i2o/iop.c
+!Idrivers/message/i2o/iop.c
+!Idrivers/message/i2o/config-osm.c
+!Edrivers/message/i2o/exec-osm.c
+!Idrivers/message/i2o/exec-osm.c
+!Idrivers/message/i2o/bus-osm.c
+!Edrivers/message/i2o/device.c
+!Idrivers/message/i2o/device.c
+!Idrivers/message/i2o/driver.c
+!Idrivers/message/i2o/pci.c
+!Idrivers/message/i2o/i2o_block.c
+!Idrivers/message/i2o/i2o_scsi.c
+!Idrivers/message/i2o/i2o_proc.c
+     </sect1>
   </chapter>
 
   <chapter id="snddev">
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 0e3924e..24dc3fc 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -365,6 +365,7 @@
        regshifts=<shift1>,<shift2>,...
        slave_addrs=<addr1>,<addr2>,...
        force_kipmid=<enable1>,<enable2>,...
+       unload_when_empty=[0|1]
 
 Each of these except si_trydefaults is a list, the first item for the
 first interface, second item for the second interface, etc.
@@ -416,6 +417,11 @@
 or users that don't want the daemon (don't need the performance, don't
 want the CPU hit) can disable it.
 
+If unload_when_empty is set to 1, the driver will be unloaded if it
+doesn't find any interfaces or all the interfaces fail to work.  The
+default is one.  Setting to 0 is useful with the hotmod, but is
+obviously only useful for modules.
+
 When compiled into the kernel, the parameters can be specified on the
 kernel command line as:
 
@@ -441,6 +447,25 @@
 interrupts enabled, the driver will run VERY slowly.  Don't blame me,
 these interfaces suck.
 
+The driver supports a hot add and remove of interfaces.  This way,
+interfaces can be added or removed after the kernel is up and running.
+This is done using /sys/modules/ipmi_si/hotmod, which is a write-only
+parameter.  You write a string to this interface.  The string has the
+format:
+   <op1>[:op2[:op3...]]
+The "op"s are:
+   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+You can specify more than one interface on the line.  The "opt"s are:
+   rsp=<regspacing>
+   rsi=<regsize>
+   rsh=<regshift>
+   irq=<irq>
+   ipmb=<ipmb slave addr>
+and these have the same meanings as discussed above.  Note that you
+can also use this on the kernel command line for a more compact format
+for specifying an interface.  Note that when removing an interface,
+only the first three parameters (si type, address type, and address)
+are used for the comparison.  Any options are ignored for removing.
 
 The SMBus Driver
 ----------------
@@ -502,7 +527,10 @@
 
   modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
       preaction=<preaction type> preop=<preop type> start_now=x
-      nowayout=x
+      nowayout=x ifnum_to_use=n
+
+ifnum_to_use specifies which interface the watchdog timer should use.
+The default is -1, which means to pick the first one registered.
 
 The timeout is the number of seconds to the action, and the pretimeout
 is the amount of seconds before the reset that the pre-timeout panic will
@@ -624,5 +652,9 @@
 in /proc/sys/dev/ipmi/poweroff_powercycle.  Note that if the system
 does not support power cycling, it will always do the power off.
 
+The "ifnum_to_use" parameter specifies which interface the poweroff
+code should use.  The default is -1, which means to pick the first one
+registered.
+
 Note that if you have ACPI enabled, the system will prefer using ACPI to
 power off.
diff --git a/Documentation/block/as-iosched.txt b/Documentation/block/as-iosched.txt
index e2a66f8..a598fe1 100644
--- a/Documentation/block/as-iosched.txt
+++ b/Documentation/block/as-iosched.txt
@@ -24,8 +24,10 @@
 Selecting IO schedulers
 -----------------------
 To choose IO schedulers at boot time, use the argument 'elevator=deadline'.
-'noop' and 'as' (the default) are also available. IO schedulers are assigned
-globally at boot time only presently.
+'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are
+assigned globally at boot time only presently. It's also possible to change
+the IO scheduler for a determined device on the fly, as described in
+Documentation/block/switching-sched.txt.
 
 
 Anticipatory IO scheduler Policies
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 70690f1..8de132a 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -3,7 +3,7 @@
 
 	     Maintained by Torben Mathiasen <device@lanana.org>
 
-		      Last revised: 15 May 2006
+		      Last revised: 29 November 2006
 
 This list is the Linux Device List, the official registry of allocated
 device numbers and /dev directory nodes for the Linux operating
@@ -94,6 +94,7 @@
 		  9 = /dev/urandom	Faster, less secure random number gen.
 		 10 = /dev/aio		Asynchronous I/O notification interface
 		 11 = /dev/kmsg		Writes to this come out as printk's
+
   1 block	RAM disk
 		  0 = /dev/ram0		First RAM disk
 		  1 = /dev/ram1		Second RAM disk
@@ -122,7 +123,7 @@
 		devices are on major 128 and above and use the PTY
 		master multiplex (/dev/ptmx) to acquire a PTY on
 		demand.
-  
+
   2 block	Floppy disks
 		  0 = /dev/fd0		Controller 0, drive 0, autodetect
 		  1 = /dev/fd1		Controller 0, drive 1, autodetect
@@ -257,7 +258,7 @@
 		129 = /dev/vcsa1	tty1 text/attribute contents
 		    ...
 		191 = /dev/vcsa63	tty63 text/attribute contents
-	
+
 		NOTE: These devices permit both read and write access.
 
   7 block	Loopback devices
@@ -411,7 +412,7 @@
 		207 = /dev/video/em8300_sp	EM8300 DVD decoder subpicture
 		208 = /dev/compaq/cpqphpc	Compaq PCI Hot Plug Controller
 		209 = /dev/compaq/cpqrid	Compaq Remote Insight Driver
-		210 = /dev/impi/bt	IMPI coprocessor block transfer	
+		210 = /dev/impi/bt	IMPI coprocessor block transfer
 		211 = /dev/impi/smic	IMPI coprocessor stream interface
 		212 = /dev/watchdogs/0	First watchdog device
 		213 = /dev/watchdogs/1	Second watchdog device
@@ -506,6 +507,7 @@
 		 33 = /dev/patmgr1	Sequencer patch manager
 		 34 = /dev/midi02	Third MIDI port
 		 50 = /dev/midi03	Fourth MIDI port
+
  14 block	BIOS harddrive callback support {2.6}
 		  0 = /dev/dos_hda	First BIOS harddrive whole disk
 		 64 = /dev/dos_hdb	Second BIOS harddrive whole disk
@@ -527,6 +529,7 @@
 
  16 char	Non-SCSI scanners
 		  0 = /dev/gs4500	Genius 4500 handheld scanner
+
  16 block	GoldStar CD-ROM
 		  0 = /dev/gscd		GoldStar CD-ROM
 
@@ -548,6 +551,7 @@
 		  0 = /dev/ttyC0	First Cyclades port
 		    ...
 		 31 = /dev/ttyC31	32nd Cyclades port
+
  19 block	"Double" compressed disk
 		  0 = /dev/double0	First compressed disk
 		    ...
@@ -563,6 +567,7 @@
 		  0 = /dev/cub0		Callout device for ttyC0
 		    ...
 		 31 = /dev/cub31	Callout device for ttyC31
+
  20 block	Hitachi CD-ROM (under development)
 		  0 = /dev/hitcd	Hitachi CD-ROM
 
@@ -582,7 +587,7 @@
 
 		This device is used on the ARM-based Acorn RiscPC.
 		Partitions are handled the same way as for IDE disks
-		(see major number 3). 
+		(see major number 3).
 
  22 char	Digiboard serial card
 		  0 = /dev/ttyD0	First Digiboard port
@@ -591,7 +596,7 @@
  22 block	Second IDE hard disk/CD-ROM interface
 		  0 = /dev/hdc		Master: whole disk (or CD-ROM)
 		 64 = /dev/hdd		Slave: whole disk (or CD-ROM)
-		
+
 		Partitions are handled the same way as for the first
 		interface (see major number 3).
 
@@ -639,6 +644,7 @@
 
  26 char	Quanta WinVision frame grabber {2.6}
 		  0 = /dev/wvisfgrab	Quanta WinVision frame grabber
+
  26 block	Second Matsushita (Panasonic/SoundBlaster) CD-ROM
 		  0 = /dev/sbpcd4	Panasonic CD-ROM controller 1 unit 0
 		  1 = /dev/sbpcd5	Panasonic CD-ROM controller 1 unit 1
@@ -670,6 +676,7 @@
 		 37 = /dev/nrawqft1	Unit 1, no rewind-on-close, no file marks
 		 38 = /dev/nrawqft2	Unit 2, no rewind-on-close, no file marks
 		 39 = /dev/nrawqft3	Unit 3, no rewind-on-close, no file marks
+
  27 block	Third Matsushita (Panasonic/SoundBlaster) CD-ROM
 		  0 = /dev/sbpcd8	Panasonic CD-ROM controller 2 unit 0
 		  1 = /dev/sbpcd9	Panasonic CD-ROM controller 2 unit 1
@@ -681,6 +688,7 @@
 		  1 = /dev/staliomem1	Second Stallion card I/O memory
 		  2 = /dev/staliomem2	Third Stallion card I/O memory
 		  3 = /dev/staliomem3	Fourth Stallion card I/O memory
+
  28 char	Atari SLM ACSI laser printer (68k/Atari)
 		  0 = /dev/slm0		First SLM laser printer
 		  1 = /dev/slm1		Second SLM laser printer
@@ -690,6 +698,7 @@
 		  1 = /dev/sbpcd13	Panasonic CD-ROM controller 3 unit 1
 		  2 = /dev/sbpcd14	Panasonic CD-ROM controller 3 unit 2
 		  3 = /dev/sbpcd15	Panasonic CD-ROM controller 3 unit 3
+
  28 block	ACSI disk (68k/Atari)
 		  0 = /dev/ada		First ACSI disk whole disk
 		 16 = /dev/adb		Second ACSI disk whole disk
@@ -750,6 +759,7 @@
  31 char	MPU-401 MIDI
 		  0 = /dev/mpu401data	MPU-401 data port
 		  1 = /dev/mpu401stat	MPU-401 status port
+
  31 block	ROM/flash memory card
 		  0 = /dev/rom0		First ROM card (rw)
 		      ...
@@ -801,7 +811,7 @@
  34 block	Fourth IDE hard disk/CD-ROM interface
 		  0 = /dev/hdg		Master: whole disk (or CD-ROM)
 		 64 = /dev/hdh		Slave: whole disk (or CD-ROM)
-		
+
 		Partitions are handled the same way as for the first
 		interface (see major number 3).
 
@@ -818,6 +828,7 @@
 		129 = /dev/smpte1	Second MIDI port, SMPTE timed
 		130 = /dev/smpte2	Third MIDI port, SMPTE timed
 		131 = /dev/smpte3	Fourth MIDI port, SMPTE timed
+
  35 block	Slow memory ramdisk
 		  0 = /dev/slram	Slow memory ramdisk
 
@@ -828,6 +839,7 @@
 		 16 = /dev/tap0		First Ethertap device
 		    ...
 		 31 = /dev/tap15	16th Ethertap device
+
  36 block	MCA ESDI hard disk
 		  0 = /dev/eda		First ESDI disk whole disk
 		 64 = /dev/edb		Second ESDI disk whole disk
@@ -882,6 +894,7 @@
 
  40 char	Matrox Meteor frame grabber {2.6}
 		  0 = /dev/mmetfgrab	Matrox Meteor frame grabber
+
  40 block	Syquest EZ135 parallel port removable drive
 		  0 = /dev/eza		Parallel EZ135 drive, whole disk
 
@@ -893,6 +906,7 @@
 
  41 char	Yet Another Micro Monitor
 		  0 = /dev/yamm		Yet Another Micro Monitor
+
  41 block	MicroSolutions BackPack parallel port CD-ROM
 		  0 = /dev/bpcd		BackPack CD-ROM
 
@@ -901,6 +915,7 @@
 		the parallel port ATAPI CD-ROM driver at major number 46.
 
  42 char	Demo/sample use
+
  42 block	Demo/sample use
 
 		This number is intended for use in sample code, as
@@ -918,6 +933,7 @@
 		  0 = /dev/ttyI0	First virtual modem
 		    ...
 		 63 = /dev/ttyI63	64th virtual modem
+
  43 block	Network block devices
 		  0 = /dev/nb0		First network block device
 		  1 = /dev/nb1		Second network block device
@@ -934,12 +950,13 @@
 		  0 = /dev/cui0		Callout device for ttyI0
 		    ...
 		 63 = /dev/cui63	Callout device for ttyI63
+
  44 block	Flash Translation Layer (FTL) filesystems
 		  0 = /dev/ftla		FTL on first Memory Technology Device
 		 16 = /dev/ftlb		FTL on second Memory Technology Device
 		 32 = /dev/ftlc		FTL on third Memory Technology Device
 		    ...
-		240 = /dev/ftlp		FTL on 16th Memory Technology Device 
+		240 = /dev/ftlp		FTL on 16th Memory Technology Device
 
 		Partitions are handled in the same way as for IDE
 		disks (see major number 3) except that the partition
@@ -958,6 +975,7 @@
 		191 = /dev/ippp63	64th SyncPPP device
 
 		255 = /dev/isdninfo	ISDN monitor interface
+
  45 block	Parallel port IDE disk devices
 		  0 = /dev/pda		First parallel port IDE disk
 		 16 = /dev/pdb		Second parallel port IDE disk
@@ -1044,6 +1062,7 @@
 		  1 = /dev/dcbri1	Second DataComm card
 		  2 = /dev/dcbri2	Third DataComm card
 		  3 = /dev/dcbri3	Fourth DataComm card
+
  52 block	Mylex DAC960 PCI RAID controller; fifth controller
 		  0 = /dev/rd/c4d0	First disk, whole disk
 		  8 = /dev/rd/c4d1	Second disk, whole disk
@@ -1093,6 +1112,7 @@
 
  55 char	DSP56001 digital signal processor
 		  0 = /dev/dsp56k	First DSP56001
+
  55 block	Mylex DAC960 PCI RAID controller; eighth controller
 		  0 = /dev/rd/c7d0	First disk, whole disk
 		  8 = /dev/rd/c7d1	Second disk, whole disk
@@ -1130,6 +1150,7 @@
 		  0 = /dev/cup0		Callout device for ttyP0
 		  1 = /dev/cup1		Callout device for ttyP1
 		    ...
+
  58 block	Reserved for logical volume manager
 
  59 char	sf firewall package
@@ -1149,6 +1170,7 @@
 		NAMING CONFLICT -- PROPOSED REVISED NAME /dev/rpda0 etc
 
  60-63 char	LOCAL/EXPERIMENTAL USE
+
  60-63 block	LOCAL/EXPERIMENTAL USE
 		Allocated for local/experimental use.  For devices not
 		assigned official numbers, these ranges should be
@@ -1434,7 +1456,6 @@
 		DAC960 (see major number 48) except that the limit on
 		partitions is 15.
 
-
  78 char	PAM Software's multimodem boards
 		  0 = /dev/ttyM0	First PAM modem
 		  1 = /dev/ttyM1	Second PAM modem
@@ -1450,7 +1471,6 @@
 		DAC960 (see major number 48) except that the limit on
 		partitions is 15.
 
-
  79 char	PAM Software's multimodem boards - alternate devices
 		  0 = /dev/cum0		Callout device for ttyM0
 		  1 = /dev/cum1		Callout device for ttyM1
@@ -1466,7 +1486,6 @@
 		DAC960 (see major number 48) except that the limit on
 		partitions is 15.
 
-
  80 char	Photometrics AT200 CCD camera
 		  0 = /dev/at200	Photometrics AT200 CCD camera
 
@@ -1679,7 +1698,7 @@
 		  1 = /dev/dcxx1	Second capture card
 		    ...
 
- 94 block IBM S/390 DASD block storage
+ 94 block	IBM S/390 DASD block storage
     		  0 = /dev/dasda First DASD device, major
     		  1 = /dev/dasda1 First DASD device, block 1
 	    	  2 = /dev/dasda2 First DASD device, block 2
@@ -1695,7 +1714,7 @@
 		  1 = /dev/ipnat	NAT control device/log file
 		  2 = /dev/ipstate	State information log file
 		  3 = /dev/ipauth	Authentication control device/log file
-		    ...		
+		    ...
 
  96 char	Parallel port ATAPI tape devices
 		  0 = /dev/pt0		First parallel port ATAPI tape
@@ -1705,7 +1724,7 @@
 		129 = /dev/npt1		Second p.p. ATAPI tape, no rewind
 		    ...
 
- 96 block Inverse NAND Flash Translation Layer
+ 96 block	Inverse NAND Flash Translation Layer
 		  0 = /dev/inftla First INFTL layer
 		 16 = /dev/inftlb Second INFTL layer
 		    ...
@@ -1937,7 +1956,6 @@
 		    ...
 
 113 block	IBM iSeries virtual CD-ROM
-
 		  0 = /dev/iseries/vcda	First virtual CD-ROM
 		  1 = /dev/iseries/vcdb	Second virtual CD-ROM
 		    ...
@@ -2059,11 +2077,12 @@
 		    ...
 
 119 char	VMware virtual network control
-		  0 = /dev/vnet0	1st virtual network
-		  1 = /dev/vnet1	2nd virtual network
+		  0 = /dev/vmnet0	1st virtual network
+		  1 = /dev/vmnet1	2nd virtual network
 		    ...
 
 120-127 char	LOCAL/EXPERIMENTAL USE
+
 120-127 block	LOCAL/EXPERIMENTAL USE
 		Allocated for local/experimental use.  For devices not
 		assigned official numbers, these ranges should be
@@ -2075,7 +2094,6 @@
 		nodes; instead they should be accessed through the
 		/dev/ptmx cloning interface.
 
-
 128 block       SCSI disk devices (128-143)
                   0 = /dev/sddy         129th SCSI disk whole disk
                  16 = /dev/sddz         130th SCSI disk whole disk
@@ -2087,7 +2105,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 129 block       SCSI disk devices (144-159)
                   0 = /dev/sdeo         145th SCSI disk whole disk
                  16 = /dev/sdep         146th SCSI disk whole disk
@@ -2123,7 +2140,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 132 block       SCSI disk devices (192-207)
                   0 = /dev/sdgk         193rd SCSI disk whole disk
                  16 = /dev/sdgl         194th SCSI disk whole disk
@@ -2135,7 +2151,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 133 block       SCSI disk devices (208-223)
                   0 = /dev/sdha         209th SCSI disk whole disk
                  16 = /dev/sdhb         210th SCSI disk whole disk
@@ -2147,7 +2162,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 134 block       SCSI disk devices (224-239)
                   0 = /dev/sdhq         225th SCSI disk whole disk
                  16 = /dev/sdhr         226th SCSI disk whole disk
@@ -2159,7 +2173,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 135 block       SCSI disk devices (240-255)
                   0 = /dev/sdig         241st SCSI disk whole disk
                  16 = /dev/sdih         242nd SCSI disk whole disk
@@ -2171,7 +2184,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 136-143 char	Unix98 PTY slaves
 		  0 = /dev/pts/0	First Unix98 pseudo-TTY
 		  1 = /dev/pts/1	Second Unix98 pesudo-TTY
@@ -2384,6 +2396,7 @@
 		    ...
 
 159 char	RESERVED
+
 159 block	RESERVED
 
 160 char	General Purpose Instrument Bus (GPIB)
@@ -2427,7 +2440,7 @@
 
 		Partitions are handled in the same way as for IDE
 		disks (see major number 3) except that the limit on
-		partitions is 31. 
+		partitions is 31.
 
 162 char	Raw block device interface
 		  0 = /dev/rawctl	Raw I/O control device
@@ -2483,7 +2496,6 @@
 
 171 char	Reserved for IEEE 1394 (Firewire)
 
-
 172 char	Moxa Intellio serial card
 		  0 = /dev/ttyMX0	First Moxa port
 		  1 = /dev/ttyMX1	Second Moxa port
@@ -2543,9 +2555,6 @@
 		 64 = /dev/usb/rio500	Diamond Rio 500
 		 65 = /dev/usb/usblcd	USBLCD Interface (info@usblcd.de)
 		 66 = /dev/usb/cpad0	Synaptics cPad (mouse/LCD)
-		 67 = /dev/usb/adutux0	1st Ontrak ADU device
-		    ...
-		 76 = /dev/usb/adutux10	10th Ontrak ADU device
 		 96 = /dev/usb/hiddev0	1st USB HID device
 		    ...
 		111 = /dev/usb/hiddev15	16th USB HID device
@@ -2558,7 +2567,7 @@
 		132 = /dev/usb/idmouse	ID Mouse (fingerprint scanner) device
 		133 = /dev/usb/sisusbvga1	First SiSUSB VGA device
 		    ...
-		140 = /dev/usb/sisusbvga8	Eigth SISUSB VGA device
+		140 = /dev/usb/sisusbvga8	Eighth SISUSB VGA device
 		144 = /dev/usb/lcd	USB LCD device
 		160 = /dev/usb/legousbtower0	1st USB Legotower device
 		    ...
@@ -2571,7 +2580,7 @@
 		  0 = /dev/uba		First USB block device
 		  8 = /dev/ubb		Second USB block device
 		 16 = /dev/ubc		Third USB block device
-		    ...
+ 		    ...
 
 181 char	Conrad Electronic parallel port radio clocks
 		  0 = /dev/pcfclock0	First Conrad radio clock
@@ -2657,7 +2666,7 @@
 		 32 = /dev/mvideo/status2	Third device
 		    ...
 		    ...
-		240 = /dev/mvideo/status15	16th device 
+		240 = /dev/mvideo/status15	16th device
 		    ...
 
 195 char	Nvidia graphics devices
@@ -2795,6 +2804,10 @@
 		    ...
 		 185 = /dev/ttyNX15		Hilscher netX serial port 15
 		 186 = /dev/ttyJ0		JTAG1 DCC protocol based serial port emulation
+		 187 = /dev/ttyUL0		Xilinx uartlite - port 0
+		    ...
+		 190 = /dev/ttyUL3		Xilinx uartlite - port 3
+		 191 = /dev/xvc0		Xen virtual console - port 0
 
 205 char	Low-density serial ports (alternate device)
 		  0 = /dev/culu0		Callout device for ttyLU0
@@ -2832,7 +2845,6 @@
 		 82 = /dev/cuvr0		Callout device for ttyVR0
 		 83 = /dev/cuvr1		Callout device for ttyVR1
 
-
 206 char	OnStream SC-x0 tape devices
 		  0 = /dev/osst0		First OnStream SCSI tape, mode 0
 		  1 = /dev/osst1		Second OnStream SCSI tape, mode 0
@@ -2922,7 +2934,6 @@
 		    ...
 
 212 char	LinuxTV.org DVB driver subsystem
-
 		  0 = /dev/dvb/adapter0/video0    first video decoder of first card
 		  1 = /dev/dvb/adapter0/audio0    first audio decoder of first card
 		  2 = /dev/dvb/adapter0/sec0      (obsolete/unused)
@@ -3008,9 +3019,9 @@
 		  2 = /dev/3270/tub2		Second 3270 terminal
 		    ...
 
-229 char	IBM iSeries virtual console
-		  0 = /dev/iseries/vtty0	First console port
-		  1 = /dev/iseries/vtty1	Second console port
+229 char	IBM iSeries/pSeries virtual console
+		  0 = /dev/hvc0			First console port
+		  1 = /dev/hvc1			Second console port
 		    ...
 
 230 char	IBM iSeries virtual tape
@@ -3083,12 +3094,14 @@
 234-239		UNASSIGNED
 
 240-254 char	LOCAL/EXPERIMENTAL USE
+
 240-254 block	LOCAL/EXPERIMENTAL USE
 		Allocated for local/experimental use.  For devices not
 		assigned official numbers, these ranges should be
 		used in order to avoid conflicting with future assignments.
 
 255 char	RESERVED
+
 255 block	RESERVED
 
 		This major is reserved to assist the expansion to a
@@ -3115,7 +3128,20 @@
 257 char	Phoenix Technologies Cryptographic Services Driver
 		  0 = /dev/ptlsec	Crypto Services Driver
 
+257 block	SSFDC Flash Translation Layer filesystem
+		  0 = /dev/ssfdca	First SSFDC layer
+		  8 = /dev/ssfdcb	Second SSFDC layer
+		 16 = /dev/ssfdcc	Third SSFDC layer
+		 24 = /dev/ssfdcd	4th SSFDC layer
+		 32 = /dev/ssfdce	5th SSFDC layer
+		 40 = /dev/ssfdcf	6th SSFDC layer
+		 48 = /dev/ssfdcg	7th SSFDC layer
+		 56 = /dev/ssfdch	8th SSFDC layer
 
+258 block	ROM/Flash read-only translation layer
+		  0 = /dev/blockrom0	First ROM card's translation layer interface
+		  1 = /dev/blockrom1	Second ROM card's translation layer interface
+		  ...
 
  ****	ADDITIONAL /dev DIRECTORY ENTRIES
 
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index eb1a6ca..790ef6f 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -124,7 +124,7 @@
 write_super_lockfs:	?
 unlockfs:		?
 statfs:			no	no	no
-remount_fs:		no	yes	maybe		(see below)
+remount_fs:		yes	yes	maybe		(see below)
 clear_inode:		no
 umount_begin:		yes	no	no
 show_options:		no				(vfsmount->sem)
diff --git a/Documentation/filesystems/fuse.txt b/Documentation/filesystems/fuse.txt
index 3d74477..345392c 100644
--- a/Documentation/filesystems/fuse.txt
+++ b/Documentation/filesystems/fuse.txt
@@ -51,6 +51,22 @@
 
   http://fuse.sourceforge.net/
 
+Filesystem type
+~~~~~~~~~~~~~~~
+
+The filesystem type given to mount(2) can be one of the following:
+
+'fuse'
+
+  This is the usual way to mount a FUSE filesystem.  The first
+  argument of the mount system call may contain an arbitrary string,
+  which is not interpreted by the kernel.
+
+'fuseblk'
+
+  The filesystem is block device based.  The first argument of the
+  mount system call is interpreted as the name of the device.
+
 Mount options
 ~~~~~~~~~~~~~
 
@@ -94,6 +110,11 @@
   The default is infinite.  Note that the size of read requests is
   limited anyway to 32 pages (which is 128kbyte on i386).
 
+'blksize=N'
+
+  Set the block size for the filesystem.  The default is 512.  This
+  option is only valid for 'fuseblk' type mounts.
+
 Control filesystem
 ~~~~~~~~~~~~~~~~~~
 
diff --git a/Documentation/filesystems/sysv-fs.txt b/Documentation/filesystems/sysv-fs.txt
index d817224..253b50d 100644
--- a/Documentation/filesystems/sysv-fs.txt
+++ b/Documentation/filesystems/sysv-fs.txt
@@ -1,11 +1,8 @@
-This is the implementation of the SystemV/Coherent filesystem for Linux.
 It implements all of
   - Xenix FS,
   - SystemV/386 FS,
   - Coherent FS.
 
-This is version beta 4.
-
 To install:
 * Answer the 'System V and Coherent filesystem support' question with 'y'
   when configuring the kernel.
@@ -28,11 +25,173 @@
   for this FS on hard disk yet.
 
 
-Please report any bugs and suggestions to
-  Bruno Haible <haible@ma2s2.mathematik.uni-karlsruhe.de>
-  Pascal Haible <haible@izfm.uni-stuttgart.de>
-  Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
+These filesystems are rather similar. Here is a comparison with Minix FS:
 
-Bruno Haible
-<haible@ma2s2.mathematik.uni-karlsruhe.de>
+* Linux fdisk reports on partitions
+  - Minix FS     0x81 Linux/Minix
+  - Xenix FS     ??
+  - SystemV FS   ??
+  - Coherent FS  0x08 AIX bootable
 
+* Size of a block or zone (data allocation unit on disk)
+  - Minix FS     1024
+  - Xenix FS     1024 (also 512 ??)
+  - SystemV FS   1024 (also 512 and 2048)
+  - Coherent FS   512
+
+* General layout: all have one boot block, one super block and
+  separate areas for inodes and for directories/data.
+  On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
+  all the block numbers (including the super block) are offset by one track.
+
+* Byte ordering of "short" (16 bit entities) on disk:
+  - Minix FS     little endian  0 1
+  - Xenix FS     little endian  0 1
+  - SystemV FS   little endian  0 1
+  - Coherent FS  little endian  0 1
+  Of course, this affects only the file system, not the data of files on it!
+
+* Byte ordering of "long" (32 bit entities) on disk:
+  - Minix FS     little endian  0 1 2 3
+  - Xenix FS     little endian  0 1 2 3
+  - SystemV FS   little endian  0 1 2 3
+  - Coherent FS  PDP-11         2 3 0 1
+  Of course, this affects only the file system, not the data of files on it!
+
+* Inode on disk: "short", 0 means non-existent, the root dir ino is:
+  - Minix FS                            1
+  - Xenix FS, SystemV FS, Coherent FS   2
+
+* Maximum number of hard links to a file:
+  - Minix FS     250
+  - Xenix FS     ??
+  - SystemV FS   ??
+  - Coherent FS  >=10000
+
+* Free inode management:
+  - Minix FS                             a bitmap
+  - Xenix FS, SystemV FS, Coherent FS
+      There is a cache of a certain number of free inodes in the super-block.
+      When it is exhausted, new free inodes are found using a linear search.
+
+* Free block management:
+  - Minix FS                             a bitmap
+  - Xenix FS, SystemV FS, Coherent FS
+      Free blocks are organized in a "free list". Maybe a misleading term,
+      since it is not true that every free block contains a pointer to
+      the next free block. Rather, the free blocks are organized in chunks
+      of limited size, and every now and then a free block contains pointers
+      to the free blocks pertaining to the next chunk; the first of these
+      contains pointers and so on. The list terminates with a "block number"
+      0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
+
+* Super-block location:
+  - Minix FS     block 1 = bytes 1024..2047
+  - Xenix FS     block 1 = bytes 1024..2047
+  - SystemV FS   bytes 512..1023
+  - Coherent FS  block 1 = bytes 512..1023
+
+* Super-block layout:
+  - Minix FS
+                    unsigned short s_ninodes;
+                    unsigned short s_nzones;
+                    unsigned short s_imap_blocks;
+                    unsigned short s_zmap_blocks;
+                    unsigned short s_firstdatazone;
+                    unsigned short s_log_zone_size;
+                    unsigned long s_max_size;
+                    unsigned short s_magic;
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short s_firstdatazone;
+                    unsigned long  s_nzones;
+                    unsigned short s_fzone_count;
+                    unsigned long  s_fzones[NICFREE];
+                    unsigned short s_finode_count;
+                    unsigned short s_finodes[NICINOD];
+                    char           s_flock;
+                    char           s_ilock;
+                    char           s_modified;
+                    char           s_rdonly;
+                    unsigned long  s_time;
+                    short          s_dinfo[4]; -- SystemV FS only
+                    unsigned long  s_free_zones;
+                    unsigned short s_free_inodes;
+                    short          s_dinfo[4]; -- Xenix FS only
+                    unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
+                    char           s_fname[6];
+                    char           s_fpack[6];
+    then they differ considerably:
+        Xenix FS
+                    char           s_clean;
+                    char           s_fill[371];
+                    long           s_magic;
+                    long           s_type;
+        SystemV FS
+                    long           s_fill[12 or 14];
+                    long           s_state;
+                    long           s_magic;
+                    long           s_type;
+        Coherent FS
+                    unsigned long  s_unique;
+    Note that Coherent FS has no magic.
+
+* Inode layout:
+  - Minix FS
+                    unsigned short i_mode;
+                    unsigned short i_uid;
+                    unsigned long  i_size;
+                    unsigned long  i_time;
+                    unsigned char  i_gid;
+                    unsigned char  i_nlinks;
+                    unsigned short i_zone[7+1+1];
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short i_mode;
+                    unsigned short i_nlink;
+                    unsigned short i_uid;
+                    unsigned short i_gid;
+                    unsigned long  i_size;
+                    unsigned char  i_zone[3*(10+1+1+1)];
+                    unsigned long  i_atime;
+                    unsigned long  i_mtime;
+                    unsigned long  i_ctime;
+
+* Regular file data blocks are organized as
+  - Minix FS
+               7 direct blocks
+               1 indirect block (pointers to blocks)
+               1 double-indirect block (pointer to pointers to blocks)
+  - Xenix FS, SystemV FS, Coherent FS
+              10 direct blocks
+               1 indirect block (pointers to blocks)
+               1 double-indirect block (pointer to pointers to blocks)
+               1 triple-indirect block (pointer to pointers to pointers to blocks)
+
+* Inode size, inodes per block
+  - Minix FS        32   32
+  - Xenix FS        64   16
+  - SystemV FS      64   16
+  - Coherent FS     64    8
+
+* Directory entry on disk
+  - Minix FS
+                    unsigned short inode;
+                    char name[14/30];
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short inode;
+                    char name[14];
+
+* Dir entry size, dir entries per block
+  - Minix FS     16/32    64/32
+  - Xenix FS     16       64
+  - SystemV FS   16       64
+  - Coherent FS  16       32
+
+* How to implement symbolic links such that the host fsck doesn't scream:
+  - Minix FS     normal
+  - Xenix FS     kludge: as regular files with  chmod 1000
+  - SystemV FS   ??
+  - Coherent FS  kludge: as regular files with  chmod 1000
+
+
+Notation: We often speak of a "block" but mean a zone (the allocation unit)
+and not the disk driver's notion of "block".
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d34fd6a..b79bcdf 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -648,6 +648,10 @@
 	idle=		[HW]
 			Format: idle=poll or idle=halt
 
+	ignore_loglevel	[KNL]
+			Ignore loglevel setting - this will print /all/
+			kernel messages to the console. Useful for debugging.
+
 	ihash_entries=	[KNL]
 			Set number of hash buckets for inode cache.
 
@@ -712,7 +716,12 @@
 			Format: <RDP>,<reset>,<pci_scan>,<verbosity>
 
 	isolcpus=	[KNL,SMP] Isolate CPUs from the general scheduler.
-			Format: <cpu number>,...,<cpu number>
+			Format:
+			<cpu number>,...,<cpu number>
+			or
+			<cpu number>-<cpu number>  (must be a positive range in ascending order)
+			or a mixture
+			<cpu number>,...,<cpu number>-<cpu number>
 			This option can be used to specify one or more CPUs
 			to isolate from the general SMP balancing and scheduling
 			algorithms. The only way to move a process onto or off
@@ -1010,6 +1019,10 @@
 			emulation library even if a 387 maths coprocessor
 			is present.
 
+	noaliencache	[MM, NUMA] Disables the allcoation of alien caches in
+			the slab allocator.  Saves per-node memory, but will
+			impact performance on real NUMA hardware.
+
 	noalign		[KNL,ARM]
 
 	noapic		[SMP,APIC] Tells the kernel to not make use of any
@@ -1288,6 +1301,7 @@
 			Param: "schedule" - profile schedule points.
 			Param: <number> - step/bucket size as a power of 2 for
 				statistical time based profiling.
+			Param: "sleep" - profile D-state sleeping (millisecs)
 
 	processor.max_cstate=	[HW,ACPI]
 			Limit processor to maximum C-state
@@ -1369,6 +1383,12 @@
 	resume=		[SWSUSP]
 			Specify the partition device for software suspend
 
+	resume_offset=	[SWSUSP]
+			Specify the offset from the beginning of the partition
+			given by "resume=" at which the swap header is located,
+			in <PAGE_SIZE> units (needed only for swap files).
+			See  Documentation/power/swsusp-and-swap-files.txt
+
 	rhash_entries=	[KNL,NET]
 			Set number of hash buckets for route cache
 
diff --git a/Documentation/power/s2ram.txt b/Documentation/power/s2ram.txt
new file mode 100644
index 0000000..b05f512
--- /dev/null
+++ b/Documentation/power/s2ram.txt
@@ -0,0 +1,56 @@
+			How to get s2ram working
+			~~~~~~~~~~~~~~~~~~~~~~~~
+			2006 Linus Torvalds
+			2006 Pavel Machek
+
+1) Check suspend.sf.net, program s2ram there has long whitelist of
+   "known ok" machines, along with tricks to use on each one.
+
+2) If that does not help, try reading tricks.txt and
+   video.txt. Perhaps problem is as simple as broken module, and
+   simple module unload can fix it.
+
+3) You can use Linus' TRACE_RESUME infrastructure, described below.
+
+		      Using TRACE_RESUME
+		      ~~~~~~~~~~~~~~~~~~
+
+I've been working at making the machines I have able to STR, and almost
+always it's a driver that is buggy. Thank God for the suspend/resume
+debugging - the thing that Chuck tried to disable. That's often the _only_
+way to debug these things, and it's actually pretty powerful (but
+time-consuming - having to insert TRACE_RESUME() markers into the device
+driver that doesn't resume and recompile and reboot).
+
+Anyway, the way to debug this for people who are interested (have a
+machine that doesn't boot) is:
+
+ - enable PM_DEBUG, and PM_TRACE
+
+ - use a script like this:
+
+	#!/bin/sh
+	sync
+	echo 1 > /sys/power/pm_trace
+	echo mem > /sys/power/state
+
+   to suspend
+
+ - if it doesn't come back up (which is usually the problem), reboot by
+   holding the power button down, and look at the dmesg output for things
+   like
+
+	Magic number: 4:156:725
+	hash matches drivers/base/power/resume.c:28
+	hash matches device 0000:01:00.0
+
+   which means that the last trace event was just before trying to resume
+   device 0000:01:00.0. Then figure out what driver is controlling that
+   device (lspci and /sys/devices/pci* is your friend), and see if you can
+   fix it, disable it, or trace into its resume function.
+
+For example, the above happens to be the VGA device on my EVO, which I
+used to run with "radeonfb" (it's an ATI Radeon mobility). It turns out
+that "radeonfb" simply cannot resume that device - it tries to set the
+PLL's, and it just _hangs_. Using the regular VGA console and letting X
+resume it instead works fine.
diff --git a/Documentation/power/swsusp-and-swap-files.txt b/Documentation/power/swsusp-and-swap-files.txt
new file mode 100644
index 0000000..06f911a
--- /dev/null
+++ b/Documentation/power/swsusp-and-swap-files.txt
@@ -0,0 +1,60 @@
+Using swap files with software suspend (swsusp)
+	(C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+
+The Linux kernel handles swap files almost in the same way as it handles swap
+partitions and there are only two differences between these two types of swap
+areas:
+(1) swap files need not be contiguous,
+(2) the header of a swap file is not in the first block of the partition that
+holds it.  From the swsusp's point of view (1) is not a problem, because it is
+already taken care of by the swap-handling code, but (2) has to be taken into
+consideration.
+
+In principle the location of a swap file's header may be determined with the
+help of appropriate filesystem driver.  Unfortunately, however, it requires the
+filesystem holding the swap file to be mounted, and if this filesystem is
+journaled, it cannot be mounted during resume from disk.  For this reason to
+identify a swap file swsusp uses the name of the partition that holds the file
+and the offset from the beginning of the partition at which the swap file's
+header is located.  For convenience, this offset is expressed in <PAGE_SIZE>
+units.
+
+In order to use a swap file with swsusp, you need to:
+
+1) Create the swap file and make it active, eg.
+
+# dd if=/dev/zero of=<swap_file_path> bs=1024 count=<swap_file_size_in_k>
+# mkswap <swap_file_path>
+# swapon <swap_file_path>
+
+2) Use an application that will bmap the swap file with the help of the
+FIBMAP ioctl and determine the location of the file's swap header, as the
+offset, in <PAGE_SIZE> units, from the beginning of the partition which
+holds the swap file.
+
+3) Add the following parameters to the kernel command line:
+
+resume=<swap_file_partition> resume_offset=<swap_file_offset>
+
+where <swap_file_partition> is the partition on which the swap file is located
+and <swap_file_offset> is the offset of the swap header determined by the
+application in 2) (of course, this step may be carried out automatically
+by the same application that determies the swap file's header offset using the
+FIBMAP ioctl)
+
+OR
+
+Use a userland suspend application that will set the partition and offset
+with the help of the SNAPSHOT_SET_SWAP_AREA ioctl described in
+Documentation/power/userland-swsusp.txt (this is the only method to suspend
+to a swap file allowing the resume to be initiated from an initrd or initramfs
+image).
+
+Now, swsusp will use the swap file in the same way in which it would use a swap
+partition.  In particular, the swap file has to be active (ie. be present in
+/proc/swaps) so that it can be used for suspending.
+
+Note that if the swap file used for suspending is deleted and recreated,
+the location of its header need not be the same as before.  Thus every time
+this happens the value of the "resume_offset=" kernel command line parameter
+has to be updated.
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index e635e6f..0761ff6 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -297,20 +297,12 @@
 suspend image to prevent sensitive data from being stolen after
 resume.
 
-Q: Why can't we suspend to a swap file?
+Q: Can I suspend to a swap file?
 
-A: Because accessing swap file needs the filesystem mounted, and
-filesystem might do something wrong (like replaying the journal)
-during mount.
-
-There are few ways to get that fixed:
-
-1) Probably could be solved by modifying every filesystem to support
-some kind of "really read-only!" option. Patches welcome.
-
-2) suspend2 gets around that by storing absolute positions in on-disk
-image (and blocksize), with resume parameter pointing directly to
-suspend header.
+A: Generally, yes, you can.  However, it requires you to use the "resume=" and
+"resume_offset=" kernel command line parameters, so the resume from a swap file
+cannot be initiated from an initrd or initramfs image.  See
+swsusp-and-swap-files.txt for details.
 
 Q: Is there a maximum system RAM size that is supported by swsusp?
 
diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt
index 64755e9..000556c 100644
--- a/Documentation/power/userland-swsusp.txt
+++ b/Documentation/power/userland-swsusp.txt
@@ -9,9 +9,8 @@
 Now, to use the userland interface for software suspend you need special
 utilities that will read/write the system memory snapshot from/to the
 kernel.  Such utilities are available, for example, from
-<http://www.sisk.pl/kernel/utilities/suspend>.  You may want to have
-a look at them if you are going to develop your own suspend/resume
-utilities.
+<http://suspend.sourceforge.net>.  You may want to have a look at them if you
+are going to develop your own suspend/resume utilities.
 
 The interface consists of a character device providing the open(),
 release(), read(), and write() operations as well as several ioctl()
@@ -21,9 +20,9 @@
 
 The device can be open either for reading or for writing.  If open for
 reading, it is considered to be in the suspend mode.  Otherwise it is
-assumed to be in the resume mode.  The device cannot be open for reading
-and writing.  It is also impossible to have the device open more than once
-at a time.
+assumed to be in the resume mode.  The device cannot be open for simultaneous
+reading and writing.  It is also impossible to have the device open more than
+once at a time.
 
 The ioctl() commands recognized by the device are:
 
@@ -69,9 +68,46 @@
 SNAPSHOT_SET_SWAP_FILE - set the resume partition (the last ioctl() argument
 	should specify the device's major and minor numbers in the old
 	two-byte format, as returned by the stat() function in the .st_rdev
-	member of the stat structure); it is recommended to always use this
-	call, because the code to set the resume partition could be removed from
-	future kernels
+	member of the stat structure)
+
+SNAPSHOT_SET_SWAP_AREA - set the resume partition and the offset (in <PAGE_SIZE>
+	units) from the beginning of the partition at which the swap header is
+	located (the last ioctl() argument should point to a struct
+	resume_swap_area, as defined in kernel/power/power.h, containing the
+	resume device specification, as for the SNAPSHOT_SET_SWAP_FILE ioctl(),
+	and the offset); for swap partitions the offset is always 0, but it is
+	different to zero for swap files (please see
+	Documentation/swsusp-and-swap-files.txt for details).
+	The SNAPSHOT_SET_SWAP_AREA ioctl() is considered as a replacement for
+	SNAPSHOT_SET_SWAP_FILE which is regarded as obsolete.   It is
+	recommended to always use this call, because the code to set the resume
+	partition may be removed from future kernels
+
+SNAPSHOT_S2RAM - suspend to RAM; using this call causes the kernel to
+	immediately enter the suspend-to-RAM state, so this call must always
+	be preceded by the SNAPSHOT_FREEZE call and it is also necessary
+	to use the SNAPSHOT_UNFREEZE call after the system wakes up.  This call
+	is needed to implement the suspend-to-both mechanism in which the
+	suspend image is first created, as though the system had been suspended
+	to disk, and then the system is suspended to RAM (this makes it possible
+	to resume the system from RAM if there's enough battery power or restore
+	its state on the basis of the saved suspend image otherwise)
+
+SNAPSHOT_PMOPS - enable the usage of the pmops->prepare, pmops->enter and
+	pmops->finish methods (the in-kernel swsusp knows these as the "platform
+	method") which are needed on many machines to (among others) speed up
+	the resume by letting the BIOS skip some steps or to let the system
+	recognise the correct state of the hardware after the resume (in
+	particular on many machines this ensures that unplugged AC
+	adapters get correctly detected and that kacpid does not run wild after
+	the resume).  The last ioctl() argument can take one of the three
+	values, defined in kernel/power/power.h:
+	PMOPS_PREPARE - make the kernel carry out the
+		pm_ops->prepare(PM_SUSPEND_DISK) operation
+	PMOPS_ENTER - make the kernel power off the system by calling
+		pm_ops->enter(PM_SUSPEND_DISK)
+	PMOPS_FINISH - make the kernel carry out the
+		pm_ops->finish(PM_SUSPEND_DISK) operation
 
 The device's read() operation can be used to transfer the snapshot image from
 the kernel.  It has the following limitations:
@@ -91,10 +127,12 @@
 still frozen when the device is being closed).
 
 Currently it is assumed that the userland utilities reading/writing the
-snapshot image from/to the kernel will use a swap partition, called the resume
-partition, as storage space.  However, this is not really required, as they
-can use, for example, a special (blank) suspend partition or a file on a partition
-that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and mounted afterwards.
+snapshot image from/to the kernel will use a swap parition, called the resume
+partition, or a swap file as storage space (if a swap file is used, the resume
+partition is the partition that holds this file).  However, this is not really
+required, as they can use, for example, a special (blank) suspend partition or
+a file on a partition that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and
+mounted afterwards.
 
 These utilities SHOULD NOT make any assumptions regarding the ordering of
 data within the snapshot image, except for the image header that MAY be
diff --git a/MAINTAINERS b/MAINTAINERS
index 5dff268..cf24400 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1091,13 +1091,19 @@
 S:	Maintained
 
 EXT2 FILE SYSTEM
-L:	ext2-devel@lists.sourceforge.net
+L:	linux-ext4@vger.kernel.org
 S:	Maintained
 
 EXT3 FILE SYSTEM
 P:	Stephen Tweedie, Andrew Morton
 M:	sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com
-L:	ext2-devel@lists.sourceforge.net
+L:	linux-ext4@vger.kernel.org
+S:	Maintained
+
+EXT4 FILE SYSTEM
+P:	Stephen Tweedie, Andrew Morton
+M:	sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com
+L:	linux-ext4@vger.kernel.org
 S:	Maintained
 
 F71805F HARDWARE MONITORING DRIVER
@@ -1214,7 +1220,8 @@
 P:	Jean Delvare
 M:	khali@linux-fr.org
 L:	lm-sensors@lm-sensors.org
-W:	http://www.lm-sensors.nu/
+W:	http://www.lm-sensors.org/
+T:	quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-hwmon/
 S:	Maintained
 
 HARDWARE RANDOM NUMBER GENERATOR CORE
@@ -1340,8 +1347,7 @@
 P:	Jean Delvare
 M:	khali@linux-fr.org
 L:	i2c@lm-sensors.org
-W:	http://www.lm-sensors.nu/
-T:	quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:	quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-i2c/
 S:	Maintained
 
 I2O
@@ -1673,7 +1679,7 @@
 JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
 P:	Stephen Tweedie, Andrew Morton
 M:	sct@redhat.com, akpm@osdl.org
-L:	ext2-devel@lists.sourceforge.net
+L:	linux-ext4@vger.kernel.org
 S:	Maintained
 
 K8TEMP HARDWARE MONITORING DRIVER
@@ -2913,7 +2919,6 @@
 SUN3/3X
 P:	Sam Creasey
 M:	sammy@sammy.net
-L:	sun3-list@redhat.com
 W:	http://sammy.net/sun3/
 S:	Maintained
 
@@ -3454,6 +3459,12 @@
 T:	git git://oss.sgi.com:8090/xfs/xfs-2.6
 S:	Supported
 
+XILINX UARTLITE SERIAL DRIVER
+P:	Peter Korsgaard
+M:	jacmet@sunsite.dk
+L:	linux-serial@vger.kernel.org
+S:	Maintained
+
 X86 3-LEVEL PAGING (PAE) SUPPORT
 P:	Ingo Molnar
 M:	mingo@redhat.com
diff --git a/README b/README
index 3e26472..c055615 100644
--- a/README
+++ b/README
@@ -1,4 +1,4 @@
-	Linux kernel release 2.6.xx <http://kernel.org>
+	Linux kernel release 2.6.xx <http://kernel.org/>
 
 These are the release notes for Linux version 2.6.  Read them carefully,
 as they tell you what this is all about, explain how to install the
@@ -22,15 +22,17 @@
 
   Although originally developed first for 32-bit x86-based PCs (386 or higher),
   today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and
-  UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH,
+  UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell,
   IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS,
-  and Renesas M32R architectures.
+  Cris, Xtensa, AVR32 and Renesas M32R architectures.
 
   Linux is easily portable to most general-purpose 32- or 64-bit architectures
   as long as they have a paged memory management unit (PMMU) and a port of the
   GNU C compiler (gcc) (part of The GNU Compiler Collection, GCC). Linux has
   also been ported to a number of architectures without a PMMU, although
   functionality is then obviously somewhat limited.
+  Linux has also been ported to itself. You can now run the kernel as a
+  userspace application - this is called UserMode Linux (UML).
 
 DOCUMENTATION:
 
@@ -113,6 +115,7 @@
    version 2.6.12.2 and want to jump to 2.6.12.3, you must first
    reverse the 2.6.12.2 patch (that is, patch -R) _before_ applying
    the 2.6.12.3 patch.
+   You can read more on this in Documentation/applying-patches.txt
 
  - Make sure you have no stale .o files and dependencies lying around:
 
@@ -161,6 +164,7 @@
    only ask you for the answers to new questions.
 
  - Alternate configuration commands are:
+	"make config"      Plain text interface.
 	"make menuconfig"  Text based color menus, radiolists & dialogs.
 	"make xconfig"     X windows (Qt) based configuration tool.
 	"make gconfig"     X windows (Gtk) based configuration tool.
@@ -303,8 +307,9 @@
 
  - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump
    as is, otherwise you will have to use the "ksymoops" program to make
-   sense of the dump.  This utility can be downloaded from
-   ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops.
+   sense of the dump (but compiling with CONFIG_KALLSYMS is usually preferred).
+   This utility can be downloaded from
+   ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops/ .
    Alternately you can do the dump lookup by hand:
 
  - In debugging dumps like the above, it helps enormously if you can
@@ -336,7 +341,7 @@
 
    If you for some reason cannot do the above (you have a pre-compiled
    kernel image or similar), telling me as much about your setup as
-   possible will help. 
+   possible will help.  Please read the REPORTING-BUGS document for details.
 
  - Alternately, you can use gdb on a running kernel. (read-only; i.e. you
    cannot change values or set break points.) To do this, first compile the
diff --git a/REPORTING-BUGS b/REPORTING-BUGS
index f9da827..ac02e42 100644
--- a/REPORTING-BUGS
+++ b/REPORTING-BUGS
@@ -40,7 +40,9 @@
 [1.] One line summary of the problem:
 [2.] Full description of the problem/report:
 [3.] Keywords (i.e., modules, networking, kernel):
-[4.] Kernel version (from /proc/version):
+[4.] Kernel information
+[4.1.] Kernel version (from /proc/version):
+[4.2.] Kernel .config file:
 [5.] Most recent kernel version which did not have the bug:
 [6.] Output of Oops.. message (if applicable) with symbolic information
      resolved (see Documentation/oops-tracing.txt)
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index ffb7d54..3c10b9a 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -516,10 +516,11 @@
 		if (bus == 0 && dfn == 0) {
 			hose = pci_isa_hose;
 		} else {
-			dev = pci_find_slot(bus, dfn);
+			dev = pci_get_bus_and_slot(bus, dfn);
 			if (!dev)
 				return -ENODEV;
 			hose = dev->sysdata;
+			pci_dev_put(dev);
 		}
 	}
 
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index b8b817f..910b43c 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -183,11 +183,15 @@
 
 	if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) {
 		u8 irq=0;
-
-		if(pci_read_config_byte(pci_find_slot(dev->bus->number, dev->devfn & ~(7)), 0x40,&irq)!=PCIBIOS_SUCCESSFUL)
+		struct pci_dev *pdev = pci_get_slot(dev->bus, dev->devfn & ~7);
+		if(pdev == NULL || pci_read_config_byte(pdev, 0x40,&irq) != PCIBIOS_SUCCESSFUL) {
+			pci_dev_put(pdev);
 			return -1;
-		else	
+		}
+		else	{
+			pci_dev_put(pdev);
 			return irq;
+		}
 	}
 
 	return COMMON_TABLE_LOOKUP;
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 93744ba..e7594a7 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -200,7 +200,7 @@
 	bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
 	hose->bus = bus;
 
-	irongate = pci_find_slot(0, 0);
+	irongate = pci_get_bus_and_slot(0, 0);
 	bus->self = irongate;
 	bus->resource[1] = &irongate_mem;
 
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 8871529..8aa9db8 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -108,7 +108,7 @@
 
 	/* If we're in an interrupt context, or have no user context,
 	   we must not take the fault.  */
-	if (!mm || in_interrupt())
+	if (!mm || in_atomic())
 		goto no_context;
 
 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
index 605dedf..b359974 100644
--- a/arch/arm/common/sharpsl_pm.c
+++ b/arch/arm/common/sharpsl_pm.c
@@ -60,16 +60,16 @@
 static int sharpsl_fatal_check(void);
 static int sharpsl_average_value(int ad);
 static void sharpsl_average_clear(void);
-static void sharpsl_charge_toggle(void *private_);
-static void sharpsl_battery_thread(void *private_);
+static void sharpsl_charge_toggle(struct work_struct *private_);
+static void sharpsl_battery_thread(struct work_struct *private_);
 
 
 /*
  * Variables
  */
 struct sharpsl_pm_status sharpsl_pm;
-DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
-DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
+DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
 DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
 
 
@@ -116,7 +116,7 @@
 EXPORT_SYMBOL(sharpsl_battery_kick);
 
 
-static void sharpsl_battery_thread(void *private_)
+static void sharpsl_battery_thread(struct work_struct *private_)
 {
 	int voltage, percent, apm_status, i = 0;
 
@@ -128,7 +128,7 @@
 	/* Corgi cannot confirm when battery fully charged so periodically kick! */
 	if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
 			&& time_after(jiffies, sharpsl_pm.charge_start_time +  SHARPSL_CHARGE_ON_TIME_INTERVAL))
-		schedule_work(&toggle_charger);
+		schedule_delayed_work(&toggle_charger, 0);
 
 	while(1) {
 		voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
@@ -212,7 +212,7 @@
 	sharpsl_pm_led(SHARPSL_LED_OFF);
 	sharpsl_pm.charge_mode = CHRG_OFF;
 
-	schedule_work(&sharpsl_bat);
+	schedule_delayed_work(&sharpsl_bat, 0);
 }
 
 static void sharpsl_charge_error(void)
@@ -222,7 +222,7 @@
 	sharpsl_pm.charge_mode = CHRG_ERROR;
 }
 
-static void sharpsl_charge_toggle(void *private_)
+static void sharpsl_charge_toggle(struct work_struct *private_)
 {
 	dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
 
@@ -254,7 +254,7 @@
 	else if (sharpsl_pm.charge_mode == CHRG_ON)
 		sharpsl_charge_off();
 
-	schedule_work(&sharpsl_bat);
+	schedule_delayed_work(&sharpsl_bat, 0);
 }
 
 
@@ -279,10 +279,10 @@
 			sharpsl_charge_off();
 	} else if (sharpsl_pm.full_count < 2) {
 		dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
-		schedule_work(&toggle_charger);
+		schedule_delayed_work(&toggle_charger, 0);
 	} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
 		dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
-		schedule_work(&toggle_charger);
+		schedule_delayed_work(&toggle_charger, 0);
 	} else {
 		sharpsl_charge_off();
 		sharpsl_pm.charge_mode = CHRG_DONE;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 48cf7ff..f38a60a 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -11,6 +11,7 @@
 #include <linux/signal.h>
 #include <linux/ptrace.h>
 #include <linux/personality.h>
+#include <linux/freezer.h>
 
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f225a08..9d2346f 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -323,7 +323,8 @@
 
 	cancel_delayed_work(&irda_config->gpio_expa);
 	PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
-	schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+	schedule_delayed_work(&irda_config->gpio_expa, 0);
 
 	return 0;
 }
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index dbc555d..cbe909b 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -74,7 +74,7 @@
 	.rows		= 8,
 	.cols		= 8,
 	.keymap		= nokia770_keymap,
-	.keymapsize	= ARRAY_SIZE(nokia770_keymap)
+	.keymapsize	= ARRAY_SIZE(nokia770_keymap),
 	.delay		= 4,
 };
 
@@ -191,7 +191,7 @@
 		printk("HP connected\n");
 }
 
-static void codec_delayed_power_down(void *arg)
+static void codec_delayed_power_down(struct work_struct *work)
 {
 	down(&audio_pwr_sem);
 	if (audio_pwr_state == -1)
@@ -200,7 +200,7 @@
 	up(&audio_pwr_sem);
 }
 
-static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
+static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
 
 static void nokia770_audio_pwr_down(void)
 {
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index a611c3b..6dcd10a 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -55,7 +55,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-#if	defined(CONFIG_OMAP_RTC) || defined(CONFIG_OMAP_RTC)
+#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
 
 #define	OMAP_RTC_BASE		0xfffb4800
 
diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c
index 3b29e59..0cbf1b0 100644
--- a/arch/arm/mach-omap1/leds-osk.c
+++ b/arch/arm/mach-omap1/leds-osk.c
@@ -35,7 +35,7 @@
 
 static u8 tps_leds_change;
 
-static void tps_work(void *unused)
+static void tps_work(struct work_struct *unused)
 {
 	for (;;) {
 		u8	leds;
@@ -61,7 +61,7 @@
 	}
 }
 
-static DECLARE_WORK(work, tps_work, NULL);
+static DECLARE_WORK(work, tps_work);
 
 #ifdef	CONFIG_OMAP_OSK_MISTRAL
 
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 26a95a6..3b1ad1d 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -206,7 +206,8 @@
 
 	cancel_delayed_work(&irda_config->gpio_expa);
 	PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
-	schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+	schedule_delayed_work(&irda_config->gpio_expa, 0);
 
 	return 0;
 }
diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c
index 1b39874..12d2fe0 100644
--- a/arch/arm/mach-pxa/akita-ioexp.c
+++ b/arch/arm/mach-pxa/akita-ioexp.c
@@ -36,11 +36,11 @@
 
 static int max7310_write(struct i2c_client *client, int address, int data);
 static struct i2c_client max7310_template;
-static void akita_ioexp_work(void *private_);
+static void akita_ioexp_work(struct work_struct *private_);
 
 static struct device *akita_ioexp_device;
 static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
-DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL);
+DECLARE_WORK(akita_ioexp, akita_ioexp_work);
 
 
 /*
@@ -158,7 +158,7 @@
 EXPORT_SYMBOL(akita_set_ioexp);
 EXPORT_SYMBOL(akita_reset_ioexp);
 
-static void akita_ioexp_work(void *private_)
+static void akita_ioexp_work(struct work_struct *private_)
 {
 	if (akita_ioexp_device)
 		max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c
index 3d211dc..01abb0a 100644
--- a/arch/arm/mach-s3c2410/dma.c
+++ b/arch/arm/mach-s3c2410/dma.c
@@ -40,7 +40,7 @@
 
 /* io map for dma */
 static void __iomem *dma_base;
-static kmem_cache_t *dma_kmem;
+static struct kmem_cache *dma_kmem;
 
 struct s3c24xx_dma_selection dma_sel;
 
@@ -1271,7 +1271,7 @@
 
 /* kmem cache implementation */
 
-static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
+static void s3c2410_dma_cache_ctor(void *p, struct kmem_cache *c, unsigned long f)
 {
 	memset(p, 0, sizeof(struct s3c2410_dma_buf));
 }
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5e658a8..9fd6d2e 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -230,7 +230,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	/*
diff --git a/arch/arm26/kernel/ecard.c b/arch/arm26/kernel/ecard.c
index 047d0a4..43dd41b 100644
--- a/arch/arm26/kernel/ecard.c
+++ b/arch/arm26/kernel/ecard.c
@@ -620,12 +620,10 @@
 	struct ex_ecid cid;
 	int i, rc = -ENOMEM;
 
-	ec = kmalloc(sizeof(ecard_t), GFP_KERNEL);
+	ec = kzalloc(sizeof(ecard_t), GFP_KERNEL);
 	if (!ec)
 		goto nomem;
 
-	memset(ec, 0, sizeof(ecard_t));
-
 	ec->slot_no	= slot;
 	ec->type        = type;
 	ec->irq		= NO_IRQ;
diff --git a/arch/arm26/mm/fault.c b/arch/arm26/mm/fault.c
index a1f6d8a..93c0cee 100644
--- a/arch/arm26/mm/fault.c
+++ b/arch/arm26/mm/fault.c
@@ -215,7 +215,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c
index 34def63..f290158 100644
--- a/arch/arm26/mm/memc.c
+++ b/arch/arm26/mm/memc.c
@@ -24,7 +24,7 @@
 
 #define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
 
-kmem_cache_t *pte_cache, *pgd_cache;
+struct kmem_cache *pte_cache, *pgd_cache;
 int page_nr;
 
 /*
@@ -162,12 +162,12 @@
 {
 }
 
-static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
+static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags)
 {
 	memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
 }
 
-static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags)
+static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags)
 {
 	memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
 }
diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c
index ca41fc1..d0abbca 100644
--- a/arch/avr32/kernel/kprobes.c
+++ b/arch/avr32/kernel/kprobes.c
@@ -154,6 +154,7 @@
 	return 1;
 
 no_kprobe:
+	preempt_enable_no_resched();
 	return ret;
 }
 
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index 3309665..0ec14854 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -15,7 +15,7 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 #include <asm/ucontext.h>
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index 44ab8a7..b68d669 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -11,7 +11,7 @@
 #include <asm/addrspace.h>
 #include <asm/cacheflush.h>
 
-void dma_cache_sync(void *vaddr, size_t size, int direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
 {
 	/*
 	 * No need to sync an uncached area
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 934c510..c73e91f 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -232,7 +232,7 @@
 	 * context, we must not take the fault..
 	 */
 
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index eae874a..14f64b0 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -10,9 +10,9 @@
  */
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/futex.h>
 #include <asm/errno.h>
-#include <asm/uaccess.h>
 
 /*
  * the various futex operations; MMU fault checking is ignored under no-MMU
@@ -200,7 +200,7 @@
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -223,7 +223,7 @@
 		break;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index a8c61da..1a5eb6c 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -947,7 +947,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (low_top_pfn << PAGE_SHIFT)) {
 			reserve_bootmem(INITRD_START, INITRD_SIZE);
-			initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start + INITRD_SIZE;
 		}
 		else {
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index b8a5882..85baeae 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -21,7 +21,7 @@
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 8b3eb50..3f12296 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -78,7 +78,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index f76dd03..19b13be 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -18,7 +18,7 @@
 #include <asm/cacheflush.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
-kmem_cache_t *pgd_cache;
+struct kmem_cache *pgd_cache;
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
@@ -100,7 +100,7 @@
 		set_page_private(next, (unsigned long) pprev);
 }
 
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
 	unsigned long flags;
 
@@ -120,7 +120,7 @@
 }
 
 /* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
 	unsigned long flags; /* can be called from interrupt context */
 
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 1077b71..6adf8f4 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -116,7 +116,7 @@
 #endif
 #else
 	if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) && 
-	    (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS)
+	    (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS))
 	    /* overlap userarea */
 	    memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 
 #endif
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index 7787f70..0295560 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -38,7 +38,7 @@
 #include <linux/personality.h>
 #include <linux/tty.h>
 #include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 
 #include <asm/setup.h>
 #include <asm/uaccess.h>
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
index 4664b55..12e937c 100644
--- a/arch/i386/kernel/acpi/cstate.c
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -156,10 +156,8 @@
 
 static void __exit ffh_cstate_exit(void)
 {
-	if (cpu_cstate_entry) {
-		free_percpu(cpu_cstate_entry);
-		cpu_cstate_entry = NULL;
-	}
+	free_percpu(cpu_cstate_entry);
+	cpu_cstate_entry = NULL;
 }
 
 arch_initcall(ffh_cstate_init);
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 1f9153a..6b5d351 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -51,10 +51,10 @@
 	}
 }
 
-static void mce_work_fn(void *data);
-static DECLARE_WORK(mce_work, mce_work_fn, NULL);
+static void mce_work_fn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
 
-static void mce_work_fn(void *data)
+static void mce_work_fn(struct work_struct *work)
 { 
 	on_each_cpu(mce_checkregs, NULL, 1, 1);
 	schedule_delayed_work(&mce_work, MCE_RATE);
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index bad8b44..065005c 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -116,7 +116,6 @@
 	return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
 {
 	return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
@@ -153,7 +152,6 @@
 {
 	.notifier_call = thermal_throttle_cpu_callback,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static __init int thermal_throttle_init_device(void)
 {
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 5c5d450..db6dd20 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -166,7 +166,6 @@
 	return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
@@ -186,7 +185,6 @@
 {
 	.notifier_call = cpuid_class_cpu_callback,
 };
-#endif /* !CONFIG_HOTPLUG_CPU */
 
 static int __init cpuid_init(void)
 {
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 144b432..a5e0e99 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -31,68 +31,6 @@
 /* This keeps a track of which one is crashing cpu. */
 static int crashing_cpu;
 
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
-							       size_t data_len)
-{
-	struct elf_note note;
-
-	note.n_namesz = strlen(name) + 1;
-	note.n_descsz = data_len;
-	note.n_type   = type;
-	memcpy(buf, &note, sizeof(note));
-	buf += (sizeof(note) +3)/4;
-	memcpy(buf, name, note.n_namesz);
-	buf += (note.n_namesz + 3)/4;
-	memcpy(buf, data, note.n_descsz);
-	buf += (note.n_descsz + 3)/4;
-
-	return buf;
-}
-
-static void final_note(u32 *buf)
-{
-	struct elf_note note;
-
-	note.n_namesz = 0;
-	note.n_descsz = 0;
-	note.n_type   = 0;
-	memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
-	struct elf_prstatus prstatus;
-	u32 *buf;
-
-	if ((cpu < 0) || (cpu >= NR_CPUS))
-		return;
-
-	/* Using ELF notes here is opportunistic.
-	 * I need a well defined structure format
-	 * for the data I pass, and I need tags
-	 * on the data to indicate what information I have
-	 * squirrelled away.  ELF notes happen to provide
-	 * all of that, so there is no need to invent something new.
-	 */
-	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-	if (!buf)
-		return;
-	memset(&prstatus, 0, sizeof(prstatus));
-	prstatus.pr_pid = current->pid;
-	elf_core_copy_regs(&prstatus.pr_reg, regs);
-	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
-				sizeof(prstatus));
-	final_note(buf);
-}
-
-static void crash_save_self(struct pt_regs *regs)
-{
-	int cpu;
-
-	cpu = safe_smp_processor_id();
-	crash_save_this_cpu(regs, cpu);
-}
-
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 static atomic_t waiting_for_crash_ipi;
 
@@ -121,7 +59,7 @@
 		crash_fixup_ss_esp(&fixed_regs, regs);
 		regs = &fixed_regs;
 	}
-	crash_save_this_cpu(regs, cpu);
+	crash_save_cpu(regs, cpu);
 	disable_local_APIC();
 	atomic_dec(&waiting_for_crash_ipi);
 	/* Assume hlt works */
@@ -195,5 +133,5 @@
 #if defined(CONFIG_X86_IO_APIC)
 	disable_IO_APIC();
 #endif
-	crash_save_self(regs);
+	crash_save_cpu(regs, safe_smp_processor_id());
 }
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7f015a7..e21dcde 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -34,6 +34,7 @@
 #include <linux/pci.h>
 #include <linux/msi.h>
 #include <linux/htirq.h>
+#include <linux/freezer.h>
 
 #include <asm/io.h>
 #include <asm/smp.h>
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index fc79e1e..af1d533 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -184,7 +184,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
 	mutex_unlock(&kprobe_mutex);
 }
 
@@ -333,7 +333,7 @@
 		return 1;
 
 ss_probe:
-#ifndef CONFIG_PREEMPT
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
 	if (p->ainsn.boostable == 1 && !p->post_handler){
 		/* Boost up -- we can execute copied instructions directly */
 		reset_current_kprobe();
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 23f5984..9723466 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -703,7 +703,6 @@
 	.resume = mc_sysdev_resume,
 };
 
-#ifdef CONFIG_HOTPLUG_CPU
 static __cpuinit int
 mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
 {
@@ -726,7 +725,6 @@
 static struct notifier_block mc_cpu_notifier = {
 	.notifier_call = mc_cpu_callback,
 };
-#endif
 
 static int __init microcode_init (void)
 {
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index fd45059..1d1a56c 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -249,7 +249,6 @@
 	return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int msr_class_cpu_callback(struct notifier_block *nfb,
 				unsigned long action, void *hcpu)
 {
@@ -270,7 +269,6 @@
 {
 	.notifier_call = msr_class_cpu_callback,
 };
-#endif
 
 static int __init msr_init(void)
 {
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 84278e0..3514b41 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -12,6 +12,7 @@
 #include <linux/dmi.h>
 #include <linux/ctype.h>
 #include <linux/pm.h>
+#include <linux/reboot.h>
 #include <asm/uaccess.h>
 #include <asm/apic.h>
 #include <asm/desc.h>
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 695d53f..79df6e6 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -448,8 +448,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
 			reserve_bootmem(INITRD_START, INITRD_SIZE);
-			initrd_start =
-				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start+INITRD_SIZE;
 		}
 		else {
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 1b080ab..5285aff 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -693,6 +693,10 @@
 		put_cpu();
 		return -EBUSY;
 	}
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
 	spin_lock_bh(&call_lock);
 	__smp_call_function_single(cpu, func, info, nonatomic, wait);
 	spin_unlock_bh(&call_lock);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index b4e6f32..4bf0e3c 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1071,13 +1071,15 @@
 
 struct warm_boot_cpu_info {
 	struct completion *complete;
+	struct work_struct task;
 	int apicid;
 	int cpu;
 };
 
-static void __cpuinit do_warm_boot_cpu(void *p)
+static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
 {
-	struct warm_boot_cpu_info *info = p;
+	struct warm_boot_cpu_info *info =
+		container_of(work, struct warm_boot_cpu_info, task);
 	do_boot_cpu(info->apicid, info->cpu);
 	complete(info->complete);
 }
@@ -1086,7 +1088,6 @@
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	struct warm_boot_cpu_info info;
-	struct work_struct task;
 	int	apicid, ret;
 	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
 
@@ -1111,7 +1112,7 @@
 	info.complete = &done;
 	info.apicid = apicid;
 	info.cpu = cpu;
-	INIT_WORK(&task, do_warm_boot_cpu, &info);
+	INIT_WORK(&info.task, do_warm_boot_cpu);
 
 	tsc_sync_disabled = 1;
 
@@ -1119,7 +1120,7 @@
 	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
 			KERNEL_PGD_PTRS);
 	flush_tlb_all();
-	schedule_work(&task);
+	schedule_work(&info.task);
 	wait_for_completion(&done);
 
 	tsc_sync_disabled = 0;
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 92849c7..7de9117 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -136,7 +136,7 @@
 		goto up_fail;
 	}
 
-	vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 	if (!vma) {
 		ret = -ENOMEM;
 		goto up_fail;
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index c447807..68de48e 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -464,7 +464,7 @@
 		u32 lock_owner;
 		int lock_owner_depth;
 	} die = {
-		.lock =			SPIN_LOCK_UNLOCKED,
+		.lock =			__SPIN_LOCK_UNLOCKED(die.lock),
 		.lock_owner =		-1,
 		.lock_owner_depth =	0
 	};
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index 7f22e03..1bbe45d 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -216,7 +216,7 @@
 static unsigned int cpufreq_init = 0;
 static struct work_struct cpufreq_delayed_get_work;
 
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *work)
 {
 	unsigned int cpu;
 
@@ -305,7 +305,7 @@
 {
 	int ret;
 
-	INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+	INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
 	ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
 					CPUFREQ_TRANSITION_NOTIFIER);
 	if (!ret)
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index f9f647c..e0fa6cb 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -32,7 +32,7 @@
 	unsigned long vaddr;
 
 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-	inc_preempt_count();
+	pagefault_disable();
 	if (!PageHighMem(page))
 		return page_address(page);
 
@@ -50,26 +50,22 @@
 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
-#ifdef CONFIG_DEBUG_HIGHMEM
-	if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
-		dec_preempt_count();
-		preempt_check_resched();
-		return;
-	}
-
-	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
-		BUG();
-#endif
 	/*
 	 * Force other mappings to Oops if they'll try to access this pte
 	 * without first remap it.  Keeping stale mappings around is a bad idea
 	 * also, in case the page changes cacheability attributes or becomes
 	 * a protected page in a hypervisor.
 	 */
-	kpte_clear_flush(kmap_pte-idx, vaddr);
+	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+		kpte_clear_flush(kmap_pte-idx, vaddr);
+	else {
+#ifdef CONFIG_DEBUG_HIGHMEM
+		BUG_ON(vaddr < PAGE_OFFSET);
+		BUG_ON(vaddr >= (unsigned long)high_memory);
+#endif
+	}
 
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 /* This is the same as kmap_atomic() but can map memory that doesn't
@@ -80,7 +76,7 @@
 	enum fixed_addresses idx;
 	unsigned long vaddr;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	idx = type + KM_TYPE_NR*smp_processor_id();
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index 1719a81..34728e4 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -17,6 +17,113 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 
+static unsigned long page_table_shareable(struct vm_area_struct *svma,
+				struct vm_area_struct *vma,
+				unsigned long addr, pgoff_t idx)
+{
+	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
+				svma->vm_start;
+	unsigned long sbase = saddr & PUD_MASK;
+	unsigned long s_end = sbase + PUD_SIZE;
+
+	/*
+	 * match the virtual addresses, permission and the alignment of the
+	 * page table page.
+	 */
+	if (pmd_index(addr) != pmd_index(saddr) ||
+	    vma->vm_flags != svma->vm_flags ||
+	    sbase < svma->vm_start || svma->vm_end < s_end)
+		return 0;
+
+	return saddr;
+}
+
+static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+{
+	unsigned long base = addr & PUD_MASK;
+	unsigned long end = base + PUD_SIZE;
+
+	/*
+	 * check on proper vm_flags and page table alignment
+	 */
+	if (vma->vm_flags & VM_MAYSHARE &&
+	    vma->vm_start <= base && end <= vma->vm_end)
+		return 1;
+	return 0;
+}
+
+/*
+ * search for a shareable pmd page for hugetlb.
+ */
+static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+{
+	struct vm_area_struct *vma = find_vma(mm, addr);
+	struct address_space *mapping = vma->vm_file->f_mapping;
+	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+			vma->vm_pgoff;
+	struct prio_tree_iter iter;
+	struct vm_area_struct *svma;
+	unsigned long saddr;
+	pte_t *spte = NULL;
+
+	if (!vma_shareable(vma, addr))
+		return;
+
+	spin_lock(&mapping->i_mmap_lock);
+	vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
+		if (svma == vma)
+			continue;
+
+		saddr = page_table_shareable(svma, vma, addr, idx);
+		if (saddr) {
+			spte = huge_pte_offset(svma->vm_mm, saddr);
+			if (spte) {
+				get_page(virt_to_page(spte));
+				break;
+			}
+		}
+	}
+
+	if (!spte)
+		goto out;
+
+	spin_lock(&mm->page_table_lock);
+	if (pud_none(*pud))
+		pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK);
+	else
+		put_page(virt_to_page(spte));
+	spin_unlock(&mm->page_table_lock);
+out:
+	spin_unlock(&mapping->i_mmap_lock);
+}
+
+/*
+ * unmap huge page backed by shared pte.
+ *
+ * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
+ * indicated by page_count > 1, unmap is achieved by clearing pud and
+ * decrementing the ref count. If count == 1, the pte page is not shared.
+ *
+ * called with vma->vm_mm->page_table_lock held.
+ *
+ * returns: 1 successfully unmapped a shared pte page
+ *	    0 the underlying pte page is not shared, or it is the last user
+ */
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	pgd_t *pgd = pgd_offset(mm, *addr);
+	pud_t *pud = pud_offset(pgd, *addr);
+
+	BUG_ON(page_count(virt_to_page(ptep)) == 0);
+	if (page_count(virt_to_page(ptep)) == 1)
+		return 0;
+
+	pud_clear(pud);
+	put_page(virt_to_page(ptep));
+	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
+	return 1;
+}
+
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 {
 	pgd_t *pgd;
@@ -25,8 +132,11 @@
 
 	pgd = pgd_offset(mm, addr);
 	pud = pud_alloc(mm, pgd, addr);
-	if (pud)
+	if (pud) {
+		if (pud_none(*pud))
+			huge_pmd_share(mm, addr, pud);
 		pte = (pte_t *) pmd_alloc(mm, pud, addr);
+	}
 	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
 
 	return pte;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index f4dd048..84697df 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -697,8 +697,8 @@
 #endif
 #endif
 
-kmem_cache_t *pgd_cache;
-kmem_cache_t *pmd_cache;
+struct kmem_cache *pgd_cache;
+struct kmem_cache *pmd_cache;
 
 void __init pgtable_cache_init(void)
 {
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 65b5c09..f349eaf 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -196,7 +196,7 @@
 	return pte;
 }
 
-void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
+void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
 {
 	memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
 }
@@ -236,7 +236,7 @@
 		set_page_private(next, (unsigned long)pprev);
 }
 
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
 	unsigned long flags;
 
@@ -256,7 +256,7 @@
 }
 
 /* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
 	unsigned long flags; /* can be called from interrupt context */
 
diff --git a/arch/i386/power/Makefile b/arch/i386/power/Makefile
index 8cfa4e8..2de7bbf 100644
--- a/arch/i386/power/Makefile
+++ b/arch/i386/power/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_PM)		+= cpu.o
-obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o
+obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o suspend.o
diff --git a/arch/i386/power/suspend.c b/arch/i386/power/suspend.c
new file mode 100644
index 0000000..db5e98d
--- /dev/null
+++ b/arch/i386/power/suspend.c
@@ -0,0 +1,158 @@
+/*
+ * Suspend support specific for i386 - temporary page tables
+ *
+ * Distribute under GPLv2
+ *
+ * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ */
+
+#include <linux/suspend.h>
+#include <linux/bootmem.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+/* Defined in arch/i386/power/swsusp.S */
+extern int restore_image(void);
+
+/* Pointer to the temporary resume page tables */
+pgd_t *resume_pg_dir;
+
+/* The following three functions are based on the analogous code in
+ * arch/i386/mm/init.c
+ */
+
+/*
+ * Create a middle page table on a resume-safe page and put a pointer to it in
+ * the given global directory entry.  This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t *resume_one_md_table_init(pgd_t *pgd)
+{
+	pud_t *pud;
+	pmd_t *pmd_table;
+
+#ifdef CONFIG_X86_PAE
+	pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
+	if (!pmd_table)
+		return NULL;
+
+	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+	pud = pud_offset(pgd, 0);
+
+	BUG_ON(pmd_table != pmd_offset(pud, 0));
+#else
+	pud = pud_offset(pgd, 0);
+	pmd_table = pmd_offset(pud, 0);
+#endif
+
+	return pmd_table;
+}
+
+/*
+ * Create a page table on a resume-safe page and place a pointer to it in
+ * a middle page directory entry.
+ */
+static pte_t *resume_one_page_table_init(pmd_t *pmd)
+{
+	if (pmd_none(*pmd)) {
+		pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
+		if (!page_table)
+			return NULL;
+
+		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+
+		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+
+		return page_table;
+	}
+
+	return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total
+ * of max_low_pfn pages, by creating page tables starting from address
+ * PAGE_OFFSET.  The page tables are allocated out of resume-safe pages.
+ */
+static int resume_physical_mapping_init(pgd_t *pgd_base)
+{
+	unsigned long pfn;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte;
+	int pgd_idx, pmd_idx;
+
+	pgd_idx = pgd_index(PAGE_OFFSET);
+	pgd = pgd_base + pgd_idx;
+	pfn = 0;
+
+	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+		pmd = resume_one_md_table_init(pgd);
+		if (!pmd)
+			return -ENOMEM;
+
+		if (pfn >= max_low_pfn)
+			continue;
+
+		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
+			if (pfn >= max_low_pfn)
+				break;
+
+			/* Map with big pages if possible, otherwise create
+			 * normal page tables.
+			 * NOTE: We can mark everything as executable here
+			 */
+			if (cpu_has_pse) {
+				set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
+				pfn += PTRS_PER_PTE;
+			} else {
+				pte_t *max_pte;
+
+				pte = resume_one_page_table_init(pmd);
+				if (!pte)
+					return -ENOMEM;
+
+				max_pte = pte + PTRS_PER_PTE;
+				for (; pte < max_pte; pte++, pfn++) {
+					if (pfn >= max_low_pfn)
+						break;
+
+					set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
+{
+#ifdef CONFIG_X86_PAE
+	int i;
+
+	/* Init entries of the first-level page table to the zero page */
+	for (i = 0; i < PTRS_PER_PGD; i++)
+		set_pgd(pg_dir + i,
+			__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
+#endif
+}
+
+int swsusp_arch_resume(void)
+{
+	int error;
+
+	resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+	if (!resume_pg_dir)
+		return -ENOMEM;
+
+	resume_init_first_level_page_table(resume_pg_dir);
+	error = resume_physical_mapping_init(resume_pg_dir);
+	if (error)
+		return error;
+
+	/* We have got enough memory and from now on we cannot recover */
+	restore_image();
+	return 0;
+}
diff --git a/arch/i386/power/swsusp.S b/arch/i386/power/swsusp.S
index 8a2b50a..53662e0 100644
--- a/arch/i386/power/swsusp.S
+++ b/arch/i386/power/swsusp.S
@@ -28,8 +28,9 @@
 	call swsusp_save
 	ret
 
-ENTRY(swsusp_arch_resume)
-	movl	$swsusp_pg_dir-__PAGE_OFFSET, %ecx
+ENTRY(restore_image)
+	movl	resume_pg_dir, %ecx
+	subl	$__PAGE_OFFSET, %ecx
 	movl	%ecx, %cr3
 
 	movl	restore_pblist, %edx
@@ -51,6 +52,10 @@
 	.p2align 4,,7
 
 done:
+	/* go back to the original page tables */
+	movl	$swapper_pg_dir, %ecx
+	subl	$__PAGE_OFFSET, %ecx
+	movl	%ecx, %cr3
 	/* Flush TLB, including "global" things (vmalloc) */
 	movl	mmu_cr4_features, %eax
 	movl	%eax, %edx
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index caab986..b62f0c4 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -209,7 +209,7 @@
 }
 #endif
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *private_)
 {
 	printk(KERN_ERR "simserial: do_softint called\n");
 }
@@ -698,7 +698,7 @@
 	info->flags = sstate->flags;
 	info->xmit_fifo_size = sstate->xmit_fifo_size;
 	info->line = line;
-	INIT_WORK(&info->work, do_softint, info);
+	INIT_WORK(&info->work, do_softint);
 	info->state = sstate;
 	if (sstate->info) {
 		kfree(info);
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index daa6b91..578737e 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -91,7 +91,7 @@
 	 * it with privilege level 3 because the IVE uses non-privileged accesses to these
 	 * tables.  IA-32 segmentation is used to protect against IA-32 accesses to them.
 	 */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (vma) {
 		memset(vma, 0, sizeof(*vma));
 		vma->vm_mm = current->mm;
@@ -117,7 +117,7 @@
 	 * code is locked in specific gate page, which is pointed by pretcode
 	 * when setup_frame_ia32
 	 */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (vma) {
 		memset(vma, 0, sizeof(*vma));
 		vma->vm_mm = current->mm;
@@ -142,7 +142,7 @@
 	 * Install LDT as anonymous memory.  This gives us all-zero segment descriptors
 	 * until a task modifies them via modify_ldt().
 	 */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (vma) {
 		memset(vma, 0, sizeof(*vma));
 		vma->vm_mm = current->mm;
@@ -214,7 +214,7 @@
 		bprm->loader += stack_base;
 	bprm->exec += stack_base;
 
-	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!mpnt)
 		return -ENOMEM;
 
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index c187743..6af400a 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -249,7 +249,7 @@
 
 #if PAGE_SHIFT > IA32_PAGE_SHIFT
 	{
-		extern kmem_cache_t *partial_page_cachep;
+		extern struct kmem_cache *partial_page_cachep;
 
 		partial_page_cachep = kmem_cache_create("partial_page_cache",
 							sizeof(struct partial_page), 0, 0,
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index 703a67c..cfa0bc0 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -330,8 +330,6 @@
 void ia64_elf32_init(struct pt_regs *regs);
 #define ELF_PLAT_INIT(_r, load_addr)	ia64_elf32_init(_r)
 
-#define elf_addr_t	u32
-
 /* This macro yields a bitmask that programs can use to figure out
    what instruction set this CPU supports.  */
 #define ELF_HWCAP	0
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 9d6a3f2..a4a6e14 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -254,7 +254,7 @@
 }
 
 /* SLAB cache for partial_page structures */
-kmem_cache_t *partial_page_cachep;
+struct kmem_cache *partial_page_cachep;
 
 /*
  * init partial_page_list.
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 51217d6..4d592ee 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -481,7 +481,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, 0);
 	mutex_unlock(&kprobe_mutex);
 }
 /*
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7cfa63a..6bedd97 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -678,7 +678,7 @@
  * disable the cmc interrupt vector.
  */
 static void
-ia64_mca_cmc_vector_disable_keventd(void *unused)
+ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
 {
 	on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
 }
@@ -690,7 +690,7 @@
  * enable the cmc interrupt vector.
  */
 static void
-ia64_mca_cmc_vector_enable_keventd(void *unused)
+ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
 {
 	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
 }
@@ -1247,8 +1247,8 @@
 	monarch_cpu = -1;
 }
 
-static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
-static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
+static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
 
 /*
  * ia64_mca_cmc_int_handler
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 0b546e2..c4c10a0 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -952,7 +952,6 @@
 	}
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int palinfo_cpu_callback(struct notifier_block *nfb,
 					unsigned long action, void *hcpu)
 {
@@ -974,7 +973,6 @@
 	.notifier_call = palinfo_cpu_callback,
 	.priority = 0,
 };
-#endif
 
 static int __init
 palinfo_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 3aaede0..e232153 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2302,7 +2302,7 @@
 	DPRINT(("smpl_buf @%p\n", smpl_buf));
 
 	/* allocate vma */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!vma) {
 		DPRINT(("Cannot allocate vma\n"));
 		goto error_kmem;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e63b8ca..fd607ca 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -575,7 +575,6 @@
 	.write   = salinfo_log_write,
 };
 
-#ifdef	CONFIG_HOTPLUG_CPU
 static int __devinit
 salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
 {
@@ -620,7 +619,6 @@
 	.notifier_call = salinfo_cpu_callback,
 	.priority = 0,
 };
-#endif	/* CONFIG_HOTPLUG_CPU */
 
 static int __init
 salinfo_init(void)
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index f7d7f56..b21ddec 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -463,15 +463,17 @@
 }
 
 struct create_idle {
+	struct work_struct work;
 	struct task_struct *idle;
 	struct completion done;
 	int cpu;
 };
 
 void
-do_fork_idle(void *_c_idle)
+do_fork_idle(struct work_struct *work)
 {
-	struct create_idle *c_idle = _c_idle;
+	struct create_idle *c_idle =
+		container_of(work, struct create_idle, work);
 
 	c_idle->idle = fork_idle(c_idle->cpu);
 	complete(&c_idle->done);
@@ -482,10 +484,10 @@
 {
 	int timeout;
 	struct create_idle c_idle = {
+		.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
 		.cpu	= cpu,
 		.done	= COMPLETION_INITIALIZER(c_idle.done),
 	};
-	DECLARE_WORK(work, do_fork_idle, &c_idle);
 
  	c_idle.idle = get_idle_for_cpu(cpu);
  	if (c_idle.idle) {
@@ -497,9 +499,9 @@
 	 * We can't use kernel_thread since we must avoid to reschedule the child.
 	 */
 	if (!keventd_up() || current_is_keventd())
-		work.func(work.data);
+		c_idle.work.func(&c_idle.work);
 	else {
-		schedule_work(&work);
+		schedule_work(&c_idle.work);
 		wait_for_completion(&c_idle.done);
 	}
 
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index f3a9585..0c7e94e 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -64,6 +64,11 @@
 	return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
 
 /*
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ff87a5c..56dc2024 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -156,7 +156,7 @@
 	 * the problem.  When the process attempts to write to the register backing store
 	 * for the first time, it will get a SEGFAULT in this case.
 	 */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (vma) {
 		memset(vma, 0, sizeof(*vma));
 		vma->vm_mm = current->mm;
@@ -175,7 +175,7 @@
 
 	/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
 	if (!(current->personality & MMAP_PAGE_ZERO)) {
-		vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+		vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 		if (vma) {
 			memset(vma, 0, sizeof(*vma));
 			vma->vm_mm = current->mm;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index f4edfbf..eb92cef 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -564,8 +564,8 @@
 void
 pcibios_disable_device (struct pci_dev *dev)
 {
-	if (dev->is_enabled)
-		acpi_pci_irq_disable(dev);
+	BUG_ON(atomic_read(&dev->enable_cnt));
+	acpi_pci_irq_disable(dev);
 }
 
 void
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 0e7778b..936205f 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -196,9 +196,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
 			reserve_bootmem(INITRD_START, INITRD_SIZE);
-			initrd_start = INITRD_START ?
-				INITRD_START + PAGE_OFFSET : 0;
-
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start + INITRD_SIZE;
 			printk("initrd:start[%08lx],size[%08lx]\n",
 				initrd_start, INITRD_SIZE);
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index b60cea4..092ea86 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -21,7 +21,7 @@
 #include <linux/unistd.h>
 #include <linux/stddef.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index abb34cc..c7efdb0 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -105,9 +105,7 @@
 		if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
 			reserve_bootmem_node(NODE_DATA(0), INITRD_START,
 				INITRD_SIZE);
-			initrd_start = INITRD_START ?
-				INITRD_START + PAGE_OFFSET : 0;
-
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start + INITRD_SIZE;
 			printk("initrd:start[%08lx],size[%08lx]\n",
 				initrd_start, INITRD_SIZE);
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
index de1304c..fa015d8 100644
--- a/arch/m68k/amiga/chipram.c
+++ b/arch/m68k/amiga/chipram.c
@@ -52,10 +52,9 @@
 #ifdef DEBUG
     printk("amiga_chip_alloc: allocate %ld bytes\n", size);
 #endif
-    res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+    res = kzalloc(sizeof(struct resource), GFP_KERNEL);
     if (!res)
 	return NULL;
-    memset(res, 0, sizeof(struct resource));
     res->name = name;
 
     if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
diff --git a/arch/m68k/atari/hades-pci.c b/arch/m68k/atari/hades-pci.c
index 6ca57b6..bee2b14 100644
--- a/arch/m68k/atari/hades-pci.c
+++ b/arch/m68k/atari/hades-pci.c
@@ -375,10 +375,9 @@
 	 * Allocate memory for bus info structure.
 	 */
 
-	bus = kmalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
+	bus = kzalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
 	if (!bus)
 		return NULL;
-	memset(bus, 0, sizeof(struct pci_bus_info));
 
 	/*
 	 * Claim resources. The m68k has no separate I/O space, both
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 911f2ce..2adbeb1 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -99,7 +99,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/m68knommu/platform/5307/timers.c b/arch/m68knommu/platform/5307/timers.c
index 24781f0..e5668af 100644
--- a/arch/m68knommu/platform/5307/timers.c
+++ b/arch/m68knommu/platform/5307/timers.c
@@ -3,7 +3,7 @@
 /*
  *	timers.c -- generic ColdFire hardware timer support.
  *
- *	Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com)
+ *	Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com)
  */
 
 /***************************************************************************/
@@ -44,6 +44,14 @@
 extern void mcf_settimericr(int timer, int level);
 extern int mcf_timerirqpending(int timer);
 
+#if defined(CONFIG_M532x)
+#define	__raw_readtrr	__raw_readl
+#define	__raw_writetrr	__raw_writel
+#else
+#define	__raw_readtrr	__raw_readw
+#define	__raw_writetrr	__raw_writew
+#endif
+
 /***************************************************************************/
 
 void coldfire_tick(void)
@@ -57,7 +65,7 @@
 void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *))
 {
 	__raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
-	__raw_writew(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
+	__raw_writetrr(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
 	__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
 		MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
 
@@ -76,7 +84,7 @@
 	unsigned long trr, tcn, offset;
 
 	tcn = __raw_readw(TA(MCFTIMER_TCN));
-	trr = __raw_readw(TA(MCFTIMER_TRR));
+	trr = __raw_readtrr(TA(MCFTIMER_TRR));
 	offset = (tcn * (1000000 / HZ)) / trr;
 
 	/* Check if we just wrapped the counters and maybe missed a tick */
@@ -120,7 +128,7 @@
 	/* Set up TIMER 2 as high speed profile clock */
 	__raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
 
-	__raw_writew(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
+	__raw_writetrr(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
 	__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
 		MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
 
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index c5482e3..1b36f62 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -114,7 +114,7 @@
 {
 }
 
-int BSP_hwclk(int op, struct hwclk_time *t)
+int BSP_hwclk(int op, struct rtc_time *t)
 {
   if (!op) {
     /* read */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4d64960..d8af858f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -242,6 +242,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config MIPS_ATLAS
 	bool "MIPS Atlas board"
@@ -265,6 +266,7 @@
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_MULTITHREADING if EXPERIMENTAL
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 	help
 	  This enables support for the MIPS Technologies Atlas evaluation
 	  board.
@@ -419,6 +421,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL
 	select SYS_SUPPORTS_BIG_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 	help
 	  The Ocelot is a MIPS-based Single Board Computer (SBC) made by
 	  Momentum Computer <http://www.momenco.com/>.
@@ -569,6 +572,7 @@
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_NUMA
 	select SYS_SUPPORTS_SMP
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 	help
 	  This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics
 	  workstations.  To compile a Linux kernel that runs on these, say Y
@@ -835,6 +839,10 @@
 	bool
 	default y
 
+config GENERIC_HARDIRQS_NO__DO_IRQ
+	bool
+	default n
+
 #
 # Select some configuration options automatically based on user selections.
 #
@@ -996,6 +1004,7 @@
 	select HW_HAS_PCI
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_32BIT_KERNEL
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config SWAP_IO_SPACE
 	bool
diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c
index c8430c0..6d55e8a 100644
--- a/arch/mips/dec/ecc-berr.c
+++ b/arch/mips/dec/ecc-berr.c
@@ -25,6 +25,7 @@
 #include <asm/cpu.h>
 #include <asm/irq_regs.h>
 #include <asm/processor.h>
+#include <asm/ptrace.h>
 #include <asm/system.h>
 #include <asm/traps.h>
 
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 269b22b..4c7cb40 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -67,7 +67,6 @@
 	.mask = mask_ioasic_irq,
 	.mask_ack = ack_ioasic_irq,
 	.unmask = unmask_ioasic_irq,
-	.end = end_ioasic_irq,
 };
 
 
@@ -106,8 +105,7 @@
 		set_irq_chip_and_handler(i, &ioasic_irq_type,
 					 handle_level_irq);
 	for (; i < base + IO_IRQ_LINES; i++)
-		set_irq_chip_and_handler(i, &ioasic_dma_irq_type,
-					 handle_level_irq);
+		set_irq_chip(i, &ioasic_dma_irq_type);
 
 	ioasic_irq_base = base;
 }
diff --git a/arch/mips/dec/kn01-berr.c b/arch/mips/dec/kn01-berr.c
index f19b461..d3b8002 100644
--- a/arch/mips/dec/kn01-berr.c
+++ b/arch/mips/dec/kn01-berr.c
@@ -20,8 +20,10 @@
 #include <linux/types.h>
 
 #include <asm/inst.h>
+#include <asm/irq_regs.h>
 #include <asm/mipsregs.h>
 #include <asm/page.h>
+#include <asm/ptrace.h>
 #include <asm/system.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
index 5a9be4c..916e46b 100644
--- a/arch/mips/dec/kn02-irq.c
+++ b/arch/mips/dec/kn02-irq.c
@@ -57,19 +57,12 @@
 	iob();
 }
 
-static void end_kn02_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		unmask_kn02_irq(irq);
-}
-
 static struct irq_chip kn02_irq_type = {
 	.typename = "KN02-CSR",
 	.ack = ack_kn02_irq,
 	.mask = mask_kn02_irq,
 	.mask_ack = ack_kn02_irq,
 	.unmask = unmask_kn02_irq,
-	.end = end_kn02_irq,
 };
 
 
diff --git a/arch/mips/emma2rh/common/irq_emma2rh.c b/arch/mips/emma2rh/common/irq_emma2rh.c
index 59b9829..8d880f0 100644
--- a/arch/mips/emma2rh/common/irq_emma2rh.c
+++ b/arch/mips/emma2rh/common/irq_emma2rh.c
@@ -56,19 +56,12 @@
 	ll_emma2rh_irq_disable(irq - emma2rh_irq_base);
 }
 
-static void emma2rh_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		ll_emma2rh_irq_enable(irq - emma2rh_irq_base);
-}
-
 struct irq_chip emma2rh_irq_controller = {
 	.typename = "emma2rh_irq",
 	.ack = emma2rh_irq_disable,
 	.mask = emma2rh_irq_disable,
 	.mask_ack = emma2rh_irq_disable,
 	.unmask = emma2rh_irq_enable,
-	.end = emma2rh_irq_end,
 };
 
 void emma2rh_irq_init(u32 irq_base)
diff --git a/arch/mips/emma2rh/markeins/irq_markeins.c b/arch/mips/emma2rh/markeins/irq_markeins.c
index 3ac4e40..2116d9b 100644
--- a/arch/mips/emma2rh/markeins/irq_markeins.c
+++ b/arch/mips/emma2rh/markeins/irq_markeins.c
@@ -48,19 +48,12 @@
 	ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base);
 }
 
-static void emma2rh_sw_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		ll_emma2rh_sw_irq_enable(irq - emma2rh_sw_irq_base);
-}
-
 struct irq_chip emma2rh_sw_irq_controller = {
 	.typename = "emma2rh_sw_irq",
 	.ack = emma2rh_sw_irq_disable,
 	.mask = emma2rh_sw_irq_disable,
 	.mask_ack = emma2rh_sw_irq_disable,
 	.unmask = emma2rh_sw_irq_enable,
-	.end = emma2rh_sw_irq_end,
 };
 
 void emma2rh_sw_irq_init(u32 irq_base)
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index 5c4f50c..f8d417b 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -39,19 +39,12 @@
 	spin_unlock_irqrestore(&r4030_lock, flags);
 }
 
-static void end_r4030_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_r4030_irq(irq);
-}
-
 static struct irq_chip r4030_irq_type = {
 	.typename = "R4030",
 	.ack = disable_r4030_irq,
 	.mask = disable_r4030_irq,
 	.mask_ack = disable_r4030_irq,
 	.unmask = enable_r4030_irq,
-	.end = end_r4030_irq,
 };
 
 void __init init_r4030_ints(void)
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 4a9f1ec..9b34238 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -90,7 +90,6 @@
 	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
 };
 
-#define elf_addr_t	u32
 #define elf_caddr_t	u32
 #define init_elf_binfmt init_elfn32_binfmt
 
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index e318137..993f7ec 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -92,7 +92,6 @@
 	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
 };
 
-#define elf_addr_t	u32
 #define elf_caddr_t	u32
 #define init_elf_binfmt init_elf32_binfmt
 
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 2526c0c..b59a676 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -19,9 +19,6 @@
 #include <asm/i8259.h>
 #include <asm/io.h>
 
-void enable_8259A_irq(unsigned int irq);
-void disable_8259A_irq(unsigned int irq);
-
 /*
  * This is the 'legacy' 8259A Programmable Interrupt Controller,
  * present in the majority of PC/AT boxes.
@@ -31,23 +28,16 @@
  * moves to arch independent land
  */
 
+static int i8259A_auto_eoi;
 DEFINE_SPINLOCK(i8259A_lock);
-
-static void end_8259A_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
-	    irq_desc[irq].action)
-		enable_8259A_irq(irq);
-}
-
+/* some platforms call this... */
 void mask_and_ack_8259A(unsigned int);
 
-static struct irq_chip i8259A_irq_type = {
-	.typename = "XT-PIC",
-	.enable = enable_8259A_irq,
-	.disable = disable_8259A_irq,
-	.ack = mask_and_ack_8259A,
-	.end = end_8259A_irq,
+static struct irq_chip i8259A_chip = {
+	.name		= "XT-PIC",
+	.mask		= disable_8259A_irq,
+	.unmask		= enable_8259A_irq,
+	.mask_ack	= mask_and_ack_8259A,
 };
 
 /*
@@ -59,8 +49,8 @@
  */
 static unsigned int cached_irq_mask = 0xffff;
 
-#define cached_21	(cached_irq_mask)
-#define cached_A1	(cached_irq_mask >> 8)
+#define cached_master_mask	(cached_irq_mask)
+#define cached_slave_mask	(cached_irq_mask >> 8)
 
 void disable_8259A_irq(unsigned int irq)
 {
@@ -70,9 +60,9 @@
 	spin_lock_irqsave(&i8259A_lock, flags);
 	cached_irq_mask |= mask;
 	if (irq & 8)
-		outb(cached_A1,0xA1);
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
 	else
-		outb(cached_21,0x21);
+		outb(cached_master_mask, PIC_MASTER_IMR);
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 }
 
@@ -84,9 +74,9 @@
 	spin_lock_irqsave(&i8259A_lock, flags);
 	cached_irq_mask &= mask;
 	if (irq & 8)
-		outb(cached_A1,0xA1);
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
 	else
-		outb(cached_21,0x21);
+		outb(cached_master_mask, PIC_MASTER_IMR);
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 }
 
@@ -98,9 +88,9 @@
 
 	spin_lock_irqsave(&i8259A_lock, flags);
 	if (irq < 8)
-		ret = inb(0x20) & mask;
+		ret = inb(PIC_MASTER_CMD) & mask;
 	else
-		ret = inb(0xA0) & (mask >> 8);
+		ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 
 	return ret;
@@ -109,7 +99,7 @@
 void make_8259A_irq(unsigned int irq)
 {
 	disable_irq_nosync(irq);
-	set_irq_chip(irq, &i8259A_irq_type);
+	set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
 	enable_irq(irq);
 }
 
@@ -125,14 +115,14 @@
 	int irqmask = 1 << irq;
 
 	if (irq < 8) {
-		outb(0x0B,0x20);		/* ISR register */
-		value = inb(0x20) & irqmask;
-		outb(0x0A,0x20);		/* back to the IRR register */
+		outb(0x0B,PIC_MASTER_CMD);	/* ISR register */
+		value = inb(PIC_MASTER_CMD) & irqmask;
+		outb(0x0A,PIC_MASTER_CMD);	/* back to the IRR register */
 		return value;
 	}
-	outb(0x0B,0xA0);		/* ISR register */
-	value = inb(0xA0) & (irqmask >> 8);
-	outb(0x0A,0xA0);		/* back to the IRR register */
+	outb(0x0B,PIC_SLAVE_CMD);	/* ISR register */
+	value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+	outb(0x0A,PIC_SLAVE_CMD);	/* back to the IRR register */
 	return value;
 }
 
@@ -149,17 +139,19 @@
 
 	spin_lock_irqsave(&i8259A_lock, flags);
 	/*
-	 * Lightweight spurious IRQ detection. We do not want to overdo
-	 * spurious IRQ handling - it's usually a sign of hardware problems, so
-	 * we only do the checks we can do without slowing down good hardware
-	 * nnecesserily.
+	 * Lightweight spurious IRQ detection. We do not want
+	 * to overdo spurious IRQ handling - it's usually a sign
+	 * of hardware problems, so we only do the checks we can
+	 * do without slowing down good hardware unnecessarily.
 	 *
-	 * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting
-	 * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A.
-	 * Thus we can check spurious 8259A IRQs without doing the quite slow
-	 * i8259A_irq_real() call for every IRQ.  This does not cover 100% of
-	 * spurious interrupts, but should be enough to warn the user that
-	 * there is something bad going on ...
+	 * Note that IRQ7 and IRQ15 (the two spurious IRQs
+	 * usually resulting from the 8259A-1|2 PICs) occur
+	 * even if the IRQ is masked in the 8259A. Thus we
+	 * can check spurious 8259A IRQs without doing the
+	 * quite slow i8259A_irq_real() call for every IRQ.
+	 * This does not cover 100% of spurious interrupts,
+	 * but should be enough to warn the user that there
+	 * is something bad going on ...
 	 */
 	if (cached_irq_mask & irqmask)
 		goto spurious_8259A_irq;
@@ -167,14 +159,14 @@
 
 handle_real_irq:
 	if (irq & 8) {
-		inb(0xA1);		/* DUMMY - (do we need this?) */
-		outb(cached_A1,0xA1);
-		outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
-		outb(0x62,0x20);	/* 'Specific EOI' to master-IRQ2 */
+		inb(PIC_SLAVE_IMR);	/* DUMMY - (do we need this?) */
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
+		outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+		outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
 	} else {
-		inb(0x21);		/* DUMMY - (do we need this?) */
-		outb(cached_21,0x21);
-		outb(0x60+irq,0x20);	/* 'Specific EOI' to master */
+		inb(PIC_MASTER_IMR);	/* DUMMY - (do we need this?) */
+		outb(cached_master_mask, PIC_MASTER_IMR);
+		outb(0x60+irq,PIC_MASTER_CMD);	/* 'Specific EOI to master */
 	}
 #ifdef CONFIG_MIPS_MT_SMTC
         if (irq_hwmask[irq] & ST0_IM)
@@ -195,7 +187,7 @@
 		goto handle_real_irq;
 
 	{
-		static int spurious_irq_mask = 0;
+		static int spurious_irq_mask;
 		/*
 		 * At this point we can be sure the IRQ is spurious,
 		 * lets ACK and report it. [once per IRQ]
@@ -216,13 +208,25 @@
 
 static int i8259A_resume(struct sys_device *dev)
 {
-	init_8259A(0);
+	init_8259A(i8259A_auto_eoi);
+	return 0;
+}
+
+static int i8259A_shutdown(struct sys_device *dev)
+{
+	/* Put the i8259A into a quiescent state that
+	 * the kernel initialization code can get it
+	 * out of.
+	 */
+	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
+	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-1 */
 	return 0;
 }
 
 static struct sysdev_class i8259_sysdev_class = {
 	set_kset_name("i8259"),
 	.resume = i8259A_resume,
+	.shutdown = i8259A_shutdown,
 };
 
 static struct sys_device device_i8259A = {
@@ -244,41 +248,41 @@
 {
 	unsigned long flags;
 
+	i8259A_auto_eoi = auto_eoi;
+
 	spin_lock_irqsave(&i8259A_lock, flags);
 
-	outb(0xff, 0x21);	/* mask all of 8259A-1 */
-	outb(0xff, 0xA1);	/* mask all of 8259A-2 */
+	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
+	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
 
 	/*
 	 * outb_p - this has to work on a wide range of PC hardware.
 	 */
-	outb_p(0x11, 0x20);	/* ICW1: select 8259A-1 init */
-	outb_p(0x00, 0x21);	/* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */
-	outb_p(0x04, 0x21);	/* 8259A-1 (the master) has a slave on IR2 */
-	if (auto_eoi)
-		outb_p(0x03, 0x21);	/* master does Auto EOI */
-	else
-		outb_p(0x01, 0x21);	/* master expects normal EOI */
+	outb_p(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
+	outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR);	/* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+	outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);	/* 8259A-1 (the master) has a slave on IR2 */
+	if (auto_eoi)	/* master does Auto EOI */
+		outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+	else		/* master expects normal EOI */
+		outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
 
-	outb_p(0x11, 0xA0);	/* ICW1: select 8259A-2 init */
-	outb_p(0x08, 0xA1);	/* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */
-	outb_p(0x02, 0xA1);	/* 8259A-2 is a slave on master's IR2 */
-	outb_p(0x01, 0xA1);	/* (slave's support for AEOI in flat mode
-				    is to be investigated) */
-
+	outb_p(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
+	outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR);	/* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+	outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR);	/* 8259A-2 is a slave on master's IR2 */
+	outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
 	if (auto_eoi)
 		/*
-		 * in AEOI mode we just have to mask the interrupt
+		 * In AEOI mode we just have to mask the interrupt
 		 * when acking.
 		 */
-		i8259A_irq_type.ack = disable_8259A_irq;
+		i8259A_chip.mask_ack = disable_8259A_irq;
 	else
-		i8259A_irq_type.ack = mask_and_ack_8259A;
+		i8259A_chip.mask_ack = mask_and_ack_8259A;
 
 	udelay(100);		/* wait for 8259A to initialize */
 
-	outb(cached_21, 0x21);	/* restore master IRQ mask */
-	outb(cached_A1, 0xA1);	/* restore slave IRQ mask */
+	outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+	outb(cached_slave_mask, PIC_SLAVE_IMR);	  /* restore slave IRQ mask */
 
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 }
@@ -291,11 +295,17 @@
 };
 
 static struct resource pic1_io_resource = {
-	.name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY
+	.name = "pic1",
+	.start = PIC_MASTER_CMD,
+	.end = PIC_MASTER_IMR,
+	.flags = IORESOURCE_BUSY
 };
 
 static struct resource pic2_io_resource = {
-	.name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY
+	.name = "pic2",
+	.start = PIC_SLAVE_CMD,
+	.end = PIC_SLAVE_IMR,
+	.flags = IORESOURCE_BUSY
 };
 
 /*
@@ -313,7 +323,7 @@
 	init_8259A(0);
 
 	for (i = 0; i < 16; i++)
-		set_irq_chip(i, &i8259A_irq_type);
+		set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
 
-	setup_irq(2, &irq2);
+	setup_irq(PIC_CASCADE_IR, &irq2);
 }
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index ab12c8f..1bbefbf 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -52,10 +52,6 @@
 	irix_core_dump, PAGE_SIZE
 };
 
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
-
 #ifdef DEBUG
 /* Debugging routines. */
 static char *get_elf_p_type(Elf32_Word p_type)
@@ -1013,7 +1009,7 @@
 	int sz;
 
 	sz = sizeof(struct elf_note);
-	sz += roundup(strlen(en->name), 4);
+	sz += roundup(strlen(en->name) + 1, 4);
 	sz += roundup(en->datasz, 4);
 
 	return sz;
@@ -1032,7 +1028,7 @@
 {
 	struct elf_note en;
 
-	en.n_namesz = strlen(men->name);
+	en.n_namesz = strlen(men->name) + 1;
 	en.n_descsz = men->datasz;
 	en.n_type = men->type;
 
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index 6cfb31c..efbd219 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -67,15 +67,6 @@
 }
 
 /*
- * End IRQ processing
- */
-static void end_mv64340_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		unmask_mv64340_irq(irq);
-}
-
-/*
  * Interrupt handler for interrupts coming from the Marvell chip.
  * It could be built in ethernet ports etc...
  */
@@ -106,7 +97,6 @@
 	.mask = mask_mv64340_irq,
 	.mask_ack = mask_mv64340_irq,
 	.unmask = unmask_mv64340_irq,
-	.end = end_mv64340_irq,
 };
 
 void __init mv64340_irq_init(unsigned int base)
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index ddcc2a5..123324b 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -29,19 +29,12 @@
 	clear_c0_intcontrol(0x100 << (irq - irq_base));
 }
 
-static void rm7k_cpu_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		unmask_rm7k_irq(irq);
-}
-
 static struct irq_chip rm7k_irq_controller = {
 	.typename = "RM7000",
 	.ack = mask_rm7k_irq,
 	.mask = mask_rm7k_irq,
 	.mask_ack = mask_rm7k_irq,
 	.unmask = unmask_rm7k_irq,
-	.end = rm7k_cpu_irq_end,
 };
 
 void __init rm7k_cpu_irq_init(int base)
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index ba6440c..0e6f4c5 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -80,19 +80,12 @@
 	on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
 }
 
-static void rm9k_cpu_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		unmask_rm9k_irq(irq);
-}
-
 static struct irq_chip rm9k_irq_controller = {
 	.typename = "RM9000",
 	.ack = mask_rm9k_irq,
 	.mask = mask_rm9k_irq,
 	.mask_ack = mask_rm9k_irq,
 	.unmask = unmask_rm9k_irq,
-	.end = rm9k_cpu_irq_end,
 };
 
 static struct irq_chip rm9k_perfcounter_irq = {
@@ -103,7 +96,6 @@
 	.mask = mask_rm9k_irq,
 	.mask_ack = mask_rm9k_irq,
 	.unmask = unmask_rm9k_irq,
-	.end = rm9k_cpu_irq_end,
 };
 
 unsigned int rm9000_perfcount_irq;
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index b339798..2fe4c86 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -117,7 +117,7 @@
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
-		seq_printf(p, " %14s", irq_desc[i].chip->typename);
+		seq_printf(p, " %14s", irq_desc[i].chip->name);
 		seq_printf(p, "  %s", action->name);
 
 		for (action=action->next; action; action = action->next)
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index be5ac23..fcc86b9 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -50,12 +50,6 @@
 	irq_disable_hazard();
 }
 
-static void mips_cpu_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		unmask_mips_irq(irq);
-}
-
 static struct irq_chip mips_cpu_irq_controller = {
 	.typename	= "MIPS",
 	.ack		= mask_mips_irq,
@@ -63,7 +57,6 @@
 	.mask_ack	= mask_mips_irq,
 	.unmask		= unmask_mips_irq,
 	.eoi		= unmask_mips_irq,
-	.end		= mips_cpu_irq_end,
 };
 
 /*
@@ -96,8 +89,6 @@
 	mask_mips_mt_irq(irq);
 }
 
-#define mips_mt_cpu_irq_end mips_cpu_irq_end
-
 static struct irq_chip mips_mt_cpu_irq_controller = {
 	.typename	= "MIPS",
 	.startup	= mips_mt_cpu_irq_startup,
@@ -106,7 +97,6 @@
 	.mask_ack	= mips_mt_cpu_irq_ack,
 	.unmask		= unmask_mips_mt_irq,
 	.eoi		= unmask_mips_mt_irq,
-	.end		= mips_mt_cpu_irq_end,
 };
 
 void __init mips_cpu_irq_init(int irq_base)
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index f06a144..2c82412 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -319,7 +319,7 @@
 static int channel_open = 0;
 
 /* the work handler */
-static void sp_work(void *data)
+static void sp_work(struct work_struct *unused)
 {
 	if (!channel_open) {
 		if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
@@ -354,7 +354,7 @@
 			return;
 		}
 
-		INIT_WORK(&work, sp_work, NULL);
+		INIT_WORK(&work, sp_work);
 		queue_work(workqueue, &work);
 	} else
 		queue_work(workqueue, &work);
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index 4a84a7b..2affa5f 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -44,19 +44,12 @@
 	*lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift;
 }
 
-static void end_lasat_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_lasat_irq(irq);
-}
-
 static struct irq_chip lasat_irq_type = {
 	.typename = "Lasat",
 	.ack = disable_lasat_irq,
 	.mask = disable_lasat_irq,
 	.mask_ack = disable_lasat_irq,
 	.unmask = enable_lasat_irq,
-	.end = end_lasat_irq,
 };
 
 static inline int ls1bit32(unsigned int x)
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c
index 7fa5fd1..5697c6e 100644
--- a/arch/mips/mm/dma-coherent.c
+++ b/arch/mips/mm/dma-coherent.c
@@ -190,14 +190,14 @@
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	BUG_ON(direction == DMA_NONE);
diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c
index 8da19fd..f088344 100644
--- a/arch/mips/mm/dma-ip27.c
+++ b/arch/mips/mm/dma-ip27.c
@@ -197,14 +197,14 @@
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	BUG_ON(direction == DMA_NONE);
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c
index ec54ed0..b42b6f7 100644
--- a/arch/mips/mm/dma-ip32.c
+++ b/arch/mips/mm/dma-ip32.c
@@ -363,14 +363,15 @@
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	enum dma_data_direction direction)
 {
 	if (direction == DMA_NONE)
 		return;
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 2eeffe5..8cecef0 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -299,14 +299,15 @@
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	enum dma_data_direction direction)
 {
 	if (direction == DMA_NONE)
 		return;
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 99ebf3c..675502a 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -39,7 +39,7 @@
 	unsigned long vaddr;
 
 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-	inc_preempt_count();
+	pagefault_disable();
 	if (!PageHighMem(page))
 		return page_address(page);
 
@@ -62,8 +62,7 @@
 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
 	if (vaddr < FIXADDR_START) { // FIXME
-		dec_preempt_count();
-		preempt_check_resched();
+		pagefault_enable();
 		return;
 	}
 
@@ -78,8 +77,7 @@
 	local_flush_tlb_one(vaddr);
 #endif
 
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 #ifndef CONFIG_LIMITED_DMA
@@ -92,7 +90,7 @@
 	enum fixed_addresses idx;
 	unsigned long vaddr;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	idx = type + KM_TYPE_NR*smp_processor_id();
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
diff --git a/arch/mips/momentum/ocelot_c/cpci-irq.c b/arch/mips/momentum/ocelot_c/cpci-irq.c
index e5a4a0a..bb11fef 100644
--- a/arch/mips/momentum/ocelot_c/cpci-irq.c
+++ b/arch/mips/momentum/ocelot_c/cpci-irq.c
@@ -66,15 +66,6 @@
 }
 
 /*
- * End IRQ processing
- */
-static void end_cpci_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		unmask_cpci_irq(irq);
-}
-
-/*
  * Interrupt handler for interrupts coming from the FPGA chip.
  * It could be built in ethernet ports etc...
  */
@@ -98,7 +89,6 @@
 	.mask = mask_cpci_irq,
 	.mask_ack = mask_cpci_irq,
 	.unmask = unmask_cpci_irq,
-	.end = end_cpci_irq,
 };
 
 void cpci_irq_init(void)
diff --git a/arch/mips/momentum/ocelot_c/uart-irq.c b/arch/mips/momentum/ocelot_c/uart-irq.c
index 0029f00..a7a80c0 100644
--- a/arch/mips/momentum/ocelot_c/uart-irq.c
+++ b/arch/mips/momentum/ocelot_c/uart-irq.c
@@ -60,15 +60,6 @@
 }
 
 /*
- * End IRQ processing
- */
-static void end_uart_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		unmask_uart_irq(irq);
-}
-
-/*
  * Interrupt handler for interrupts coming from the FPGA chip.
  */
 void ll_uart_irq(void)
@@ -91,7 +82,6 @@
 	.mask = mask_uart_irq,
 	.mask_ack = mask_uart_irq,
 	.unmask = unmask_uart_irq,
-	.end = end_uart_irq,
 };
 
 void uart_irq_init(void)
diff --git a/arch/mips/philips/pnx8550/common/int.c b/arch/mips/philips/pnx8550/common/int.c
index 0dc2393..2c36c10 100644
--- a/arch/mips/philips/pnx8550/common/int.c
+++ b/arch/mips/philips/pnx8550/common/int.c
@@ -158,20 +158,12 @@
 	return prev_priority;
 }
 
-static void end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
-		unmask_irq(irq);
-	}
-}
-
 static struct irq_chip level_irq_type = {
 	.typename =	"PNX Level IRQ",
 	.ack =		mask_irq,
 	.mask =		mask_irq,
 	.mask_ack =	mask_irq,
 	.unmask =	unmask_irq,
-	.end =		end_irq,
 };
 
 static struct irqaction gic_action = {
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index c7b1380..c44f8be 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -51,19 +51,12 @@
 	sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0));
 }
 
-static void end_local0_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_local0_irq(irq);
-}
-
 static struct irq_chip ip22_local0_irq_type = {
 	.typename	= "IP22 local 0",
 	.ack		= disable_local0_irq,
 	.mask		= disable_local0_irq,
 	.mask_ack	= disable_local0_irq,
 	.unmask		= enable_local0_irq,
-	.end		= end_local0_irq,
 };
 
 static void enable_local1_irq(unsigned int irq)
@@ -79,19 +72,12 @@
 	sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1));
 }
 
-static void end_local1_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_local1_irq(irq);
-}
-
 static struct irq_chip ip22_local1_irq_type = {
 	.typename	= "IP22 local 1",
 	.ack		= disable_local1_irq,
 	.mask		= disable_local1_irq,
 	.mask_ack	= disable_local1_irq,
 	.unmask		= enable_local1_irq,
-	.end		= end_local1_irq,
 };
 
 static void enable_local2_irq(unsigned int irq)
@@ -107,19 +93,12 @@
 		sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
 }
 
-static void end_local2_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_local2_irq(irq);
-}
-
 static struct irq_chip ip22_local2_irq_type = {
 	.typename	= "IP22 local 2",
 	.ack		= disable_local2_irq,
 	.mask		= disable_local2_irq,
 	.mask_ack	= disable_local2_irq,
 	.unmask		= enable_local2_irq,
-	.end		= end_local2_irq,
 };
 
 static void enable_local3_irq(unsigned int irq)
@@ -135,19 +114,12 @@
 		sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
 }
 
-static void end_local3_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_local3_irq(irq);
-}
-
 static struct irq_chip ip22_local3_irq_type = {
 	.typename	= "IP22 local 3",
 	.ack		= disable_local3_irq,
 	.mask		= disable_local3_irq,
 	.mask_ack	= disable_local3_irq,
 	.unmask		= enable_local3_irq,
-	.end		= end_local3_irq,
 };
 
 static void indy_local0_irqdispatch(void)
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 5f8835b..319f880 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -332,13 +332,6 @@
 	intr_disconnect_level(cpu, swlevel);
 }
 
-static void end_bridge_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
-	    irq_desc[irq].action)
-		enable_bridge_irq(irq);
-}
-
 static struct irq_chip bridge_irq_type = {
 	.typename	= "bridge",
 	.startup	= startup_bridge_irq,
@@ -347,7 +340,6 @@
 	.mask		= disable_bridge_irq,
 	.mask_ack	= disable_bridge_irq,
 	.unmask		= enable_bridge_irq,
-	.end		= end_bridge_irq,
 };
 
 void __devinit register_bridge_irq(unsigned int irq)
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 7d36172..c20e989 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -180,10 +180,6 @@
 {
 }
 
-static void end_rt_irq(unsigned int irq)
-{
-}
-
 static struct irq_chip rt_irq_type = {
 	.typename	= "SN HUB RT timer",
 	.ack		= disable_rt_irq,
@@ -191,7 +187,6 @@
 	.mask_ack	= disable_rt_irq,
 	.unmask		= enable_rt_irq,
 	.eoi		= enable_rt_irq,
-	.end		= end_rt_irq,
 };
 
 static struct irqaction rt_irqaction = {
diff --git a/arch/mips/tx4927/common/tx4927_irq.c b/arch/mips/tx4927/common/tx4927_irq.c
index 21873de..ed4a19a 100644
--- a/arch/mips/tx4927/common/tx4927_irq.c
+++ b/arch/mips/tx4927/common/tx4927_irq.c
@@ -66,12 +66,10 @@
 #define TX4927_IRQ_CP0_INIT     ( 1 << 10 )
 #define TX4927_IRQ_CP0_ENABLE   ( 1 << 13 )
 #define TX4927_IRQ_CP0_DISABLE  ( 1 << 14 )
-#define TX4927_IRQ_CP0_ENDIRQ   ( 1 << 16 )
 
 #define TX4927_IRQ_PIC_INIT     ( 1 << 20 )
 #define TX4927_IRQ_PIC_ENABLE   ( 1 << 23 )
 #define TX4927_IRQ_PIC_DISABLE  ( 1 << 24 )
-#define TX4927_IRQ_PIC_ENDIRQ   ( 1 << 26 )
 
 #define TX4927_IRQ_ALL         0xffffffff
 #endif
@@ -82,12 +80,10 @@
 					  | TX4927_IRQ_WARN | TX4927_IRQ_EROR
 //                                       | TX4927_IRQ_CP0_INIT
 //                                       | TX4927_IRQ_CP0_ENABLE
-//                                       | TX4927_IRQ_CP0_DISABLE
 //                                       | TX4927_IRQ_CP0_ENDIRQ
 //                                       | TX4927_IRQ_PIC_INIT
 //                                       | TX4927_IRQ_PIC_ENABLE
 //                                       | TX4927_IRQ_PIC_DISABLE
-//                                       | TX4927_IRQ_PIC_ENDIRQ
 //                                       | TX4927_IRQ_INIT
 //                                       | TX4927_IRQ_NEST1
 //                                       | TX4927_IRQ_NEST2
@@ -114,11 +110,9 @@
 
 static void tx4927_irq_cp0_enable(unsigned int irq);
 static void tx4927_irq_cp0_disable(unsigned int irq);
-static void tx4927_irq_cp0_end(unsigned int irq);
 
 static void tx4927_irq_pic_enable(unsigned int irq);
 static void tx4927_irq_pic_disable(unsigned int irq);
-static void tx4927_irq_pic_end(unsigned int irq);
 
 /*
  * Kernel structs for all pic's
@@ -131,7 +125,6 @@
 	.mask		= tx4927_irq_cp0_disable,
 	.mask_ack	= tx4927_irq_cp0_disable,
 	.unmask		= tx4927_irq_cp0_enable,
-	.end		= tx4927_irq_cp0_end,
 };
 
 #define TX4927_PIC_NAME "TX4927-PIC"
@@ -141,7 +134,6 @@
 	.mask		= tx4927_irq_pic_disable,
 	.mask_ack	= tx4927_irq_pic_disable,
 	.unmask		= tx4927_irq_pic_enable,
-	.end		= tx4927_irq_pic_end,
 };
 
 #define TX4927_PIC_ACTION(s) { no_action, 0, CPU_MASK_NONE, s, NULL, NULL }
@@ -214,15 +206,6 @@
 	tx4927_irq_cp0_modify(CCP0_STATUS, tx4927_irq_cp0_mask(irq), 0);
 }
 
-static void tx4927_irq_cp0_end(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_ENDIRQ, "irq=%d \n", irq);
-
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		tx4927_irq_cp0_enable(irq);
-	}
-}
-
 /*
  * Functions for pic
  */
@@ -376,15 +359,6 @@
 			      tx4927_irq_pic_mask(irq), 0);
 }
 
-static void tx4927_irq_pic_end(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_ENDIRQ, "irq=%d\n", irq);
-
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		tx4927_irq_pic_enable(irq);
-	}
-}
-
 /*
  * Main init functions
  */
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
index 34cdb2a..5a5ea6c 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
@@ -153,7 +153,6 @@
 #define TOSHIBA_RBTX4927_IRQ_IOC_INIT      ( 1 << 10 )
 #define TOSHIBA_RBTX4927_IRQ_IOC_ENABLE    ( 1 << 13 )
 #define TOSHIBA_RBTX4927_IRQ_IOC_DISABLE   ( 1 << 14 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ    ( 1 << 16 )
 
 #define TOSHIBA_RBTX4927_IRQ_ISA_INIT      ( 1 << 20 )
 #define TOSHIBA_RBTX4927_IRQ_ISA_ENABLE    ( 1 << 23 )
@@ -172,7 +171,6 @@
 //                                                 | TOSHIBA_RBTX4927_IRQ_IOC_INIT
 //                                                 | TOSHIBA_RBTX4927_IRQ_IOC_ENABLE
 //                                                 | TOSHIBA_RBTX4927_IRQ_IOC_DISABLE
-//                                                 | TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_INIT
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_ENABLE
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_DISABLE
@@ -223,7 +221,6 @@
 
 static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq);
 static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq);
-static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq);
 
 #ifdef CONFIG_TOSHIBA_FPCIB0
 static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq);
@@ -239,7 +236,6 @@
 	.mask = toshiba_rbtx4927_irq_ioc_disable,
 	.mask_ack = toshiba_rbtx4927_irq_ioc_disable,
 	.unmask = toshiba_rbtx4927_irq_ioc_enable,
-	.end = toshiba_rbtx4927_irq_ioc_end,
 };
 #define TOSHIBA_RBTX4927_IOC_INTR_ENAB 0xbc002000
 #define TOSHIBA_RBTX4927_IOC_INTR_STAT 0xbc002006
@@ -388,23 +384,6 @@
 	TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v);
 }
 
-static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq)
-{
-	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ,
-				     "irq=%d\n", irq);
-
-	if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
-	    || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
-		TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
-					     "bad irq=%d\n", irq);
-		panic("\n");
-	}
-
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		toshiba_rbtx4927_irq_ioc_enable(irq);
-	}
-}
-
 
 /**********************************************************************************/
 /* Functions for isa                                                              */
diff --git a/arch/mips/tx4938/common/irq.c b/arch/mips/tx4938/common/irq.c
index 42e1276..a347b42 100644
--- a/arch/mips/tx4938/common/irq.c
+++ b/arch/mips/tx4938/common/irq.c
@@ -39,11 +39,9 @@
 
 static void tx4938_irq_cp0_enable(unsigned int irq);
 static void tx4938_irq_cp0_disable(unsigned int irq);
-static void tx4938_irq_cp0_end(unsigned int irq);
 
 static void tx4938_irq_pic_enable(unsigned int irq);
 static void tx4938_irq_pic_disable(unsigned int irq);
-static void tx4938_irq_pic_end(unsigned int irq);
 
 /**********************************************************************************/
 /* Kernel structs for all pic's                                                   */
@@ -56,7 +54,6 @@
 	.mask = tx4938_irq_cp0_disable,
 	.mask_ack = tx4938_irq_cp0_disable,
 	.unmask = tx4938_irq_cp0_enable,
-	.end = tx4938_irq_cp0_end,
 };
 
 #define TX4938_PIC_NAME "TX4938-PIC"
@@ -66,7 +63,6 @@
 	.mask = tx4938_irq_pic_disable,
 	.mask_ack = tx4938_irq_pic_disable,
 	.unmask = tx4938_irq_pic_enable,
-	.end = tx4938_irq_pic_end,
 };
 
 static struct irqaction tx4938_irq_pic_action = {
@@ -104,14 +100,6 @@
 	clear_c0_status(tx4938_irq_cp0_mask(irq));
 }
 
-static void
-tx4938_irq_cp0_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		tx4938_irq_cp0_enable(irq);
-	}
-}
-
 /**********************************************************************************/
 /* Functions for pic                                                              */
 /**********************************************************************************/
@@ -269,14 +257,6 @@
 			      tx4938_irq_pic_mask(irq), 0);
 }
 
-static void
-tx4938_irq_pic_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		tx4938_irq_pic_enable(irq);
-	}
-}
-
 /**********************************************************************************/
 /* Main init functions                                                            */
 /**********************************************************************************/
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
index 8c87a35..b6f363d 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/irq.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
@@ -89,7 +89,6 @@
 
 static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq);
 static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
-static void toshiba_rbtx4938_irq_ioc_end(unsigned int irq);
 
 #define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
 static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
@@ -98,7 +97,6 @@
 	.mask = toshiba_rbtx4938_irq_ioc_disable,
 	.mask_ack = toshiba_rbtx4938_irq_ioc_disable,
 	.unmask = toshiba_rbtx4938_irq_ioc_enable,
-	.end = toshiba_rbtx4938_irq_ioc_end,
 };
 
 #define TOSHIBA_RBTX4938_IOC_INTR_ENAB 0xb7f02000
@@ -167,14 +165,6 @@
 	TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
 }
 
-static void
-toshiba_rbtx4938_irq_ioc_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		toshiba_rbtx4938_irq_ioc_enable(irq);
-	}
-}
-
 extern void __init txx9_spi_irqinit(int irc_irq);
 
 void __init arch_init_irq(void)
diff --git a/arch/mips/vr41xx/Kconfig b/arch/mips/vr41xx/Kconfig
index 92f41f6..c8dfd80 100644
--- a/arch/mips/vr41xx/Kconfig
+++ b/arch/mips/vr41xx/Kconfig
@@ -6,6 +6,7 @@
 	select ISA
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config IBM_WORKPAD
 	bool "Support for IBM WorkPad z50"
@@ -15,6 +16,7 @@
 	select ISA
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config NEC_CMBVR4133
 	bool "Support for NEC CMB-VR4133"
@@ -39,6 +41,7 @@
 	select IRQ_CPU
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 	help
 	  The TANBAC VR4131 multichip module(TB0225) and
 	  the TANBAC VR4131DIMM(TB0229) are MIPS-based platforms
@@ -71,6 +74,7 @@
 	select IRQ_CPU
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config ZAO_CAPCELLA
 	bool "Support for ZAO Networks Capcella"
@@ -80,6 +84,7 @@
 	select IRQ_CPU
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config PCI_VR41XX
 	bool "Add PCI control unit support of NEC VR4100 series"
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 54b92a7..c075261 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -427,19 +427,12 @@
 	icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
 }
 
-static void end_sysint1_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
-}
-
 static struct irq_chip sysint1_irq_type = {
 	.typename	= "SYSINT1",
 	.ack		= disable_sysint1_irq,
 	.mask		= disable_sysint1_irq,
 	.mask_ack	= disable_sysint1_irq,
 	.unmask		= enable_sysint1_irq,
-	.end		= end_sysint1_irq,
 };
 
 static void disable_sysint2_irq(unsigned int irq)
@@ -452,19 +445,12 @@
 	icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
 }
 
-static void end_sysint2_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
-}
-
 static struct irq_chip sysint2_irq_type = {
 	.typename	= "SYSINT2",
 	.ack		= disable_sysint2_irq,
 	.mask		= disable_sysint2_irq,
 	.mask_ack	= disable_sysint2_irq,
 	.unmask		= enable_sysint2_irq,
-	.end		= end_sysint2_irq,
 };
 
 static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
index 1e64e7b..ecb10a4 100644
--- a/arch/parisc/kernel/binfmt_elf32.c
+++ b/arch/parisc/kernel/binfmt_elf32.c
@@ -75,7 +75,6 @@
 	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
 };
 
-#define elf_addr_t	unsigned int
 #define init_elf_binfmt init_elf32_binfmt
 
 #define ELF_PLATFORM  ("PARISC32\0")
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 64785e4..641f9c9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -152,7 +152,7 @@
 	const struct exception_table_entry *fix;
 	unsigned long acc_type;
 
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 89b03c8..d3f2080 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -46,61 +46,6 @@
 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
 cpumask_t cpus_in_sr = CPU_MASK_NONE;
 
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
-							       size_t data_len)
-{
-	struct elf_note note;
-
-	note.n_namesz = strlen(name) + 1;
-	note.n_descsz = data_len;
-	note.n_type   = type;
-	memcpy(buf, &note, sizeof(note));
-	buf += (sizeof(note) +3)/4;
-	memcpy(buf, name, note.n_namesz);
-	buf += (note.n_namesz + 3)/4;
-	memcpy(buf, data, note.n_descsz);
-	buf += (note.n_descsz + 3)/4;
-
-	return buf;
-}
-
-static void final_note(u32 *buf)
-{
-	struct elf_note note;
-
-	note.n_namesz = 0;
-	note.n_descsz = 0;
-	note.n_type   = 0;
-	memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
-	struct elf_prstatus prstatus;
-	u32 *buf;
-
-	if ((cpu < 0) || (cpu >= NR_CPUS))
-		return;
-
-	/* Using ELF notes here is opportunistic.
-	 * I need a well defined structure format
-	 * for the data I pass, and I need tags
-	 * on the data to indicate what information I have
-	 * squirrelled away.  ELF notes happen to provide
-	 * all of that that no need to invent something new.
-	 */
-	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-	if (!buf) 
-		return;
-
-	memset(&prstatus, 0, sizeof(prstatus));
-	prstatus.pr_pid = current->pid;
-	elf_core_copy_regs(&prstatus.pr_reg, regs);
-	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
-			sizeof(prstatus));
-	final_note(buf);
-}
-
 #ifdef CONFIG_SMP
 static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
 
@@ -113,7 +58,7 @@
 
 	hard_irq_disable();
 	if (!cpu_isset(cpu, cpus_in_crash))
-		crash_save_this_cpu(regs, cpu);
+		crash_save_cpu(regs, cpu);
 	cpu_set(cpu, cpus_in_crash);
 
 	/*
@@ -306,7 +251,7 @@
 	 * such that another IPI will not be sent.
 	 */
 	crashing_cpu = smp_processor_id();
-	crash_save_this_cpu(regs, crashing_cpu);
+	crash_save_cpu(regs, crashing_cpu);
 	crash_kexec_prepare_cpus(crashing_cpu);
 	cpu_set(crashing_cpu, cpus_in_crash);
 	if (ppc_md.kexec_cpu_down)
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7b8d12b..4657563 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -85,7 +85,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, 0);
 	mutex_unlock(&kprobe_mutex);
 }
 
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index b9561d3..7d0f13f 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -101,7 +101,7 @@
 static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
 
 /* Use slab cache to guarantee 4k alignment */
-static kmem_cache_t *flash_block_cache = NULL;
+static struct kmem_cache *flash_block_cache = NULL;
 
 #define FLASH_BLOCK_LIST_VERSION (1UL)
 
@@ -286,7 +286,7 @@
 }
 
 /* constructor for flash_block_cache */
-void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags)
+void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags)
 {
 	memset(ptr, 0, RTAS_BLK_SIZE);
 }
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 320353f..e4ebe1a 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -36,7 +36,7 @@
 #include <linux/stddef.h>
 #include <linux/tty.h>
 #include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #endif
 
 #include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index c913ad5..a4b28c7 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -264,7 +264,7 @@
 
 
 	/* Allocate a VMA structure and fill it up */
-	vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 	if (vma == NULL) {
 		rc = -ENOMEM;
 		goto fail_mmapsem;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 506d897..89c836d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -146,6 +146,11 @@
 	return hugepte_offset(hpdp, addr);
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
 {
 	pte_t *hugepte = hugepd_page(*hpdp);
@@ -1042,7 +1047,7 @@
 	return err;
 }
 
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
 {
 	memset(addr, 0, kmem_cache_size(cache));
 }
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9a17854..d12a87e 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -141,7 +141,7 @@
 }
 module_init(setup_kcore);
 
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
 {
 	memset(addr, 0, kmem_cache_size(cache));
 }
@@ -166,9 +166,9 @@
 /* Hugepages need one extra cache, initialized in hugetlbpage.c.  We
  * can't put into the tables above, because HPAGE_SHIFT is not compile
  * time constant. */
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
 #else
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
 #endif
 
 void pgtable_cache_init(void)
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index c7d0107..e3af911 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -40,7 +40,7 @@
 
 #include "spufs.h"
 
-static kmem_cache_t *spufs_inode_cache;
+static struct kmem_cache *spufs_inode_cache;
 char *isolated_loader;
 
 static struct inode *
@@ -48,7 +48,7 @@
 {
 	struct spufs_inode_info *ei;
 
-	ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
+	ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 
@@ -65,7 +65,7 @@
 }
 
 static void
-spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
+spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct spufs_inode_info *ei = p;
 
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
index 31bcdae..0e83776 100644
--- a/arch/powerpc/platforms/embedded6xx/ls_uart.c
+++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c
@@ -14,7 +14,7 @@
 
 static struct work_struct wd_work;
 
-static void wd_stop(void *unused)
+static void wd_stop(struct work_struct *unused)
 {
 	const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
 	int i = 0, rescue = 8;
@@ -122,7 +122,7 @@
 
 	ls_uart_init();
 
-	INIT_WORK(&wd_work, wd_stop, NULL);
+	INIT_WORK(&wd_work, wd_stop);
 	schedule_work(&wd_work);
 
 	return 0;
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index afa593a..c3a8941 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -18,11 +18,11 @@
 
 #define OLD_BACKLIGHT_MAX 15
 
-static void pmac_backlight_key_worker(void *data);
-static void pmac_backlight_set_legacy_worker(void *data);
+static void pmac_backlight_key_worker(struct work_struct *work);
+static void pmac_backlight_set_legacy_worker(struct work_struct *work);
 
-static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
-static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
+static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
+static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
 
 /* Although these variables are used in interrupt context, it makes no sense to
  * protect them. No user is able to produce enough key events per second and
@@ -94,7 +94,7 @@
 	return level;
 }
 
-static void pmac_backlight_key_worker(void *data)
+static void pmac_backlight_key_worker(struct work_struct *work)
 {
 	if (atomic_read(&kernel_backlight_disabled))
 		return;
@@ -166,7 +166,7 @@
 	return error;
 }
 
-static void pmac_backlight_set_legacy_worker(void *data)
+static void pmac_backlight_set_legacy_worker(struct work_struct *work)
 {
 	if (atomic_read(&kernel_backlight_disabled))
 		return;
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 1370774..49037ed 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -37,8 +37,8 @@
 /* EEH event workqueue setup. */
 static DEFINE_SPINLOCK(eeh_eventlist_lock);
 LIST_HEAD(eeh_eventlist);
-static void eeh_thread_launcher(void *);
-DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+static void eeh_thread_launcher(struct work_struct *);
+DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
 
 /* Serialize reset sequences for a given pci device */
 DEFINE_MUTEX(eeh_event_mutex);
@@ -103,7 +103,7 @@
  * eeh_thread_launcher
  * @dummy - unused
  */
-static void eeh_thread_launcher(void *dummy)
+static void eeh_thread_launcher(struct work_struct *dummy)
 {
 	if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
 		printk(KERN_ERR "Failed to start EEH daemon\n");
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index 2e1943e..709952c 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -385,6 +385,7 @@
 	phy_info_t	*phy;
 	struct work_struct phy_relink;
 	struct work_struct phy_display_config;
+	struct net_device *dev;
 
 	uint	sequence_done;
 
@@ -1391,10 +1392,11 @@
 	NULL
 };
 
-static void mii_display_status(void *data)
+static void mii_display_status(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	volatile struct fcc_enet_private *fep = dev->priv;
+	volatile struct fcc_enet_private *fep =
+		container_of(work, struct fcc_enet_private, phy_relink);
+	struct net_device *dev = fep->dev;
 	uint s = fep->phy_status;
 
 	if (!fep->link && !fep->old_link) {
@@ -1428,10 +1430,12 @@
 	printk(".\n");
 }
 
-static void mii_display_config(void *data)
+static void mii_display_config(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	volatile struct fcc_enet_private *fep = dev->priv;
+	volatile struct fcc_enet_private *fep =
+		container_of(work, struct fcc_enet_private,
+			     phy_display_config);
+	struct net_device *dev = fep->dev;
 	uint s = fep->phy_status;
 
 	printk("%s: config: auto-negotiation ", dev->name);
@@ -1758,8 +1762,9 @@
 		cep->phy_id_done = 0;
 		cep->phy_addr = fip->fc_phyaddr;
 		mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
-		INIT_WORK(&cep->phy_relink, mii_display_status, dev);
-		INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
+		INIT_WORK(&cep->phy_relink, mii_display_status);
+		INIT_WORK(&cep->phy_display_config, mii_display_config);
+		cep->dev = dev;
 #endif	/* CONFIG_USE_MDIO */
 
 		fip++;
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c
index 2f9fa9e..e6c28fb 100644
--- a/arch/ppc/8xx_io/fec.c
+++ b/arch/ppc/8xx_io/fec.c
@@ -173,6 +173,7 @@
 	uint	phy_speed;
 	phy_info_t	*phy;
 	struct work_struct phy_task;
+	struct net_device *dev;
 
 	uint	sequence_done;
 
@@ -1263,10 +1264,11 @@
 	printk(".\n");
 }
 
-static void mii_display_config(void *priv)
+static void mii_display_config(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *)priv;
-	struct fec_enet_private *fep = dev->priv;
+	struct fec_enet_private *fep =
+		container_of(work, struct fec_enet_private, phy_task);
+	struct net_device *dev = fep->dev;
 	volatile uint *s = &(fep->phy_status);
 
 	printk("%s: config: auto-negotiation ", dev->name);
@@ -1295,10 +1297,11 @@
 	fep->sequence_done = 1;
 }
 
-static void mii_relink(void *priv)
+static void mii_relink(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *)priv;
-	struct fec_enet_private *fep = dev->priv;
+	struct fec_enet_private *fep =
+		container_of(work, struct fec_enet_private, phy_task);
+	struct net_device *dev = fep->dev;
 	int duplex;
 
 	fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
@@ -1325,7 +1328,8 @@
 {
 	struct fec_enet_private *fep = dev->priv;
 
-	INIT_WORK(&fep->phy_task, mii_relink, (void *)dev);
+	fep->dev = dev;
+	INIT_WORK(&fep->phy_task, mii_relink);
 	schedule_work(&fep->phy_task);
 }
 
@@ -1333,7 +1337,8 @@
 {
 	struct fec_enet_private *fep = dev->priv;
 
-	INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev);
+	fep->dev = dev;
+	INIT_WORK(&fep->phy_task, mii_display_config);
 	schedule_work(&fep->phy_task);
 }
 
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index af1e8fc..b8c2372 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -92,8 +92,8 @@
  * Work queue
  */
 static struct workqueue_struct *appldata_wq;
-static void appldata_work_fn(void *data);
-static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
+static void appldata_work_fn(struct work_struct *work);
+static DECLARE_WORK(appldata_work, appldata_work_fn);
 
 
 /*
@@ -125,7 +125,7 @@
  *
  * call data gathering function for each (active) module
  */
-static void appldata_work_fn(void *data)
+static void appldata_work_fn(struct work_struct *work)
 {
 	struct list_head *lh;
 	struct appldata_ops *ops;
@@ -561,7 +561,6 @@
 	spin_unlock(&appldata_timer_lock);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __cpuinit
 appldata_cpu_notify(struct notifier_block *self,
 		    unsigned long action, void *hcpu)
@@ -582,7 +581,6 @@
 static struct notifier_block appldata_nb = {
 	.notifier_call = appldata_cpu_notify,
 };
-#endif
 
 /*
  * appldata_init()
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 9565a2d..5c46054 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -176,7 +176,6 @@
 
 #include <linux/highuid.h>
 
-#define elf_addr_t	u32
 /*
 #define init_elf_binfmt init_elf32_binfmt
 */
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 67914fe..576368c 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -200,7 +200,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, 0);
 	mutex_unlock(&kprobe_mutex);
 }
 
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 2d549ed..bbaca66 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -11,7 +11,7 @@
 
 #include <linux/errno.h>
 #include <linux/mm.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/futex.h>
 
 #ifndef __s390x__
@@ -258,7 +258,7 @@
 {
 	int oldval = 0, newval, ret;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -284,7 +284,7 @@
 	default:
 		ret = -ENOSYS;
 	}
-	dec_preempt_count();
+	pagefault_enable();
 	*old = oldval;
 	return ret;
 }
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index bffc7e1..d83d64a 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -51,6 +51,14 @@
 config ARCH_MAY_HAVE_PC_FDC
 	bool
 
+config STACKTRACE_SUPPORT
+	bool
+	default y
+
+config LOCKDEP_SUPPORT
+	bool
+	default y
+
 source "init/Kconfig"
 
 menu "System type"
@@ -219,6 +227,20 @@
 	help
 	  Select SHMIN if configuring for the SHMIN board.
 
+config SH_7206_SOLUTION_ENGINE
+	bool "SolutionEngine7206"
+	select CPU_SUBTYPE_SH7206
+	help
+	  Select 7206 SolutionEngine if configuring for a Hitachi SH7206
+	  evaluation board.
+
+config SH_7619_SOLUTION_ENGINE
+	bool "SolutionEngine7619"
+	select CPU_SUBTYPE_SH7619
+	help
+	  Select 7619 SolutionEngine if configuring for a Hitachi SH7619
+	  evaluation board.
+
 config SH_UNKNOWN
 	bool "BareCPU"
 	help
@@ -280,12 +302,20 @@
 
 menu "Processor features"
 
-config CPU_LITTLE_ENDIAN
-	bool "Little Endian"
+choice
+	prompt "Endianess selection" 
+	default CPU_LITTLE_ENDIAN
 	help
 	  Some SuperH machines can be configured for either little or big
-	  endian byte order. These modes require different kernels. Say Y if
-	  your machine is little endian, N if it's a big endian machine.
+	  endian byte order. These modes require different kernels.
+
+config CPU_LITTLE_ENDIAN
+	bool "Little Endian"
+
+config CPU_BIG_ENDIAN
+	bool "Big Endian"
+
+endchoice
 
 config SH_FPU
 	bool "FPU support"
@@ -345,6 +375,9 @@
 config CPU_HAS_INTC2_IRQ
 	bool
 
+config CPU_HAS_IPR_IRQ
+	bool
+
 config CPU_HAS_SR_RB
 	bool "CPU has SR.RB"
 	depends on CPU_SH3 || CPU_SH4
@@ -357,6 +390,9 @@
 	  See <file:Documentation/sh/register-banks.txt> for further
 	  information on SR.RB and register banking in the kernel in general.
 
+config CPU_HAS_PTEA
+	bool
+
 endmenu
 
 menu "Timer support"
@@ -364,10 +400,25 @@
 
 config SH_TMU
 	bool "TMU timer support"
+	depends on CPU_SH3 || CPU_SH4
 	default y
 	help
 	  This enables the use of the TMU as the system timer.
 
+config SH_CMT
+	bool "CMT timer support"
+	depends on CPU_SH2
+	default y
+	help
+	  This enables the use of the CMT as the system timer.
+
+config SH_MTU2
+	bool "MTU2 timer support"
+	depends on CPU_SH2A
+	default n
+	help
+	  This enables the use of the MTU2 as the system timer.
+
 endmenu
 
 source "arch/sh/boards/renesas/hs7751rvoip/Kconfig"
@@ -376,19 +427,52 @@
 
 source "arch/sh/boards/renesas/r7780rp/Kconfig"
 
+config SH_TIMER_IRQ
+	int
+	default "28" if CPU_SUBTYPE_SH7780
+	default "86" if CPU_SUBTYPE_SH7619
+	default "140" if CPU_SUBTYPE_SH7206
+	default "16"
+
+config NO_IDLE_HZ
+	bool "Dynamic tick timer"
+	help
+	  Select this option if you want to disable continuous timer ticks
+	  and have them programmed to occur as required. This option saves
+	  power as the system can remain in idle state for longer.
+
+	  By default dynamic tick is disabled during the boot, and can be
+	  manually enabled with:
+
+	    echo 1 > /sys/devices/system/timer/timer0/dyn_tick
+
+	  Alternatively, if you want dynamic tick automatically enabled
+	  during boot, pass "dyntick=enable" via the kernel command string.
+
+	  Please note that dynamic tick may affect the accuracy of
+	  timekeeping on some platforms depending on the implementation.
+
 config SH_PCLK_FREQ
 	int "Peripheral clock frequency (in Hz)"
+	default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
+	default "31250000" if CPU_SUBTYPE_SH7619
+	default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \
+			      CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 || \
+			      CPU_SUBTYPE_SH7206
 	default "50000000" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7780
 	default "60000000" if CPU_SUBTYPE_SH7751
-	default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \
-			      CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705
-	default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
 	default "66000000" if CPU_SUBTYPE_SH4_202
 	help
 	  This option is used to specify the peripheral clock frequency.
 	  This is necessary for determining the reference clock value on
 	  platforms lacking an RTC.
 
+config SH_CLK_MD
+	int "CPU Mode Pin Setting"
+	depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206
+	help
+	  MD2 - MD0 Setting.
+
 menu "CPU Frequency scaling"
 
 source "drivers/cpufreq/Kconfig"
@@ -421,6 +505,8 @@
 	  behavior is platform-dependent, but normally the flash frequency is
 	  a hyperbolic function of the 5-minute load average.
 
+source "arch/sh/drivers/Kconfig"
+
 endmenu
 
 config ISA_DMA_API
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 48479e0..66a25ef 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -1,5 +1,9 @@
 menu "Kernel hacking"
 
+config TRACE_IRQFLAGS_SUPPORT
+	bool
+	default y
+
 source "lib/Kconfig.debug"
 
 config SH_STANDARD_BIOS
@@ -17,7 +21,18 @@
 
 config EARLY_SCIF_CONSOLE
 	bool "Use early SCIF console"
-	depends on CPU_SH4 || CPU_SH2A && !SH_STANDARD_BIOS
+	help
+	  This enables an early console using a fixed SCIF port. This can
+	  be used by platforms that are either not running the SH
+	  standard BIOS, or do not wish to use the BIOS callbacks for the
+	  serial I/O.
+
+config EARLY_SCIF_CONSOLE_PORT
+	hex "SCIF port for early console"
+	depends on EARLY_SCIF_CONSOLE
+	default "0xffe00000" if CPU_SUBTYPE_SH7780
+	default "0xfffe9800" if CPU_SUBTYPE_SH72060
+	default "0xffe80000" if CPU_SH4
 
 config EARLY_PRINTK
 	bool "Early printk support"
@@ -30,6 +45,11 @@
 	  when the kernel may crash or hang before the serial console is
 	  initialised. If unsure, say N.
 
+	  On devices that are running SH-IPL and want to keep the port
+	  initialization consistent while not using the BIOS callbacks,
+	  select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using
+	  the kernel command line option to toggle back and forth.
+
 config DEBUG_STACKOVERFLOW
 	bool "Check for stack overflows"
 	depends on DEBUG_KERNEL
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 26d62ff..d10bba5 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -13,10 +13,6 @@
 # for "archclean" and "archdep" for cleaning up and making dependencies for
 # this architecture
 #
-
-cflags-y				:= -mb
-cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	:= -ml
-
 isa-y					:= any
 isa-$(CONFIG_SH_DSP)			:= sh
 isa-$(CONFIG_CPU_SH2)			:= sh2
@@ -38,13 +34,16 @@
 endif
 endif
 
-cflags-y	+= $(call as-option,-Wa$(comma)-isa=$(isa-y),)
-
-cflags-$(CONFIG_CPU_SH2)		+= -m2
-cflags-$(CONFIG_CPU_SH3)		+= -m3
-cflags-$(CONFIG_CPU_SH4)		+= -m4 \
+cflags-$(CONFIG_CPU_SH2)		:= -m2
+cflags-$(CONFIG_CPU_SH3)		:= -m3
+cflags-$(CONFIG_CPU_SH4)		:= -m4 \
 	$(call cc-option,-mno-implicit-fp,-m4-nofpu)
-cflags-$(CONFIG_CPU_SH4A)		+= $(call cc-option,-m4a-nofpu,)
+cflags-$(CONFIG_CPU_SH4A)		:= -m4a $(call cc-option,-m4a-nofpu,)
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= -mb
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -ml
+
+cflags-y	+= $(call as-option,-Wa$(comma)-isa=$(isa-y),) -ffreestanding
 
 cflags-$(CONFIG_SH_DSP)			+= -Wa,-dsp
 cflags-$(CONFIG_SH_KGDB)		+= -g
@@ -59,7 +58,9 @@
 # never be used by anyone. Use a board-specific defconfig that has a
 # reasonable chance of being current instead.
 #
-KBUILD_DEFCONFIG := rts7751r2d_defconfig
+KBUILD_DEFCONFIG := r7780rp_defconfig
+
+KBUILD_IMAGE	:= arch/sh/boot/zImage
 
 #
 # Choosing incompatible machines durings configuration will result in
@@ -109,6 +110,8 @@
 machdir-$(CONFIG_SH_LANDISK)			:= landisk
 machdir-$(CONFIG_SH_TITAN)			:= titan
 machdir-$(CONFIG_SH_SHMIN)			:= shmin
+machdir-$(CONFIG_SH_7206_SOLUTION_ENGINE)	:= se/7206
+machdir-$(CONFIG_SH_7619_SOLUTION_ENGINE)	:= se/7619
 machdir-$(CONFIG_SH_UNKNOWN)			:= unknown
 
 incdir-y			:= $(notdir $(machdir-y))
@@ -124,6 +127,7 @@
 core-$(CONFIG_VOYAGERGX)	+= arch/sh/cchips/voyagergx/
 
 cpuincdir-$(CONFIG_CPU_SH2)	:= cpu-sh2
+cpuincdir-$(CONFIG_CPU_SH2A)	:= cpu-sh2a
 cpuincdir-$(CONFIG_CPU_SH3)	:= cpu-sh3
 cpuincdir-$(CONFIG_CPU_SH4)	:= cpu-sh4
 
diff --git a/arch/sh/boards/renesas/r7780rp/Makefile b/arch/sh/boards/renesas/r7780rp/Makefile
index f1776d0..574b031 100644
--- a/arch/sh/boards/renesas/r7780rp/Makefile
+++ b/arch/sh/boards/renesas/r7780rp/Makefile
@@ -3,4 +3,6 @@
 #
 
 obj-y	 := setup.o io.o irq.o
-obj-$(CONFIG_HEARTBEAT)	+= led.o
+
+obj-$(CONFIG_HEARTBEAT)		+= led.o
+obj-$(CONFIG_PUSH_SWITCH)	+= psw.o
diff --git a/arch/sh/boards/renesas/r7780rp/irq.c b/arch/sh/boards/renesas/r7780rp/irq.c
index aa15ec5..cc381e1 100644
--- a/arch/sh/boards/renesas/r7780rp/irq.c
+++ b/arch/sh/boards/renesas/r7780rp/irq.c
@@ -10,6 +10,7 @@
  */
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <asm/r7780rp.h>
 
diff --git a/arch/sh/boards/renesas/r7780rp/psw.c b/arch/sh/boards/renesas/r7780rp/psw.c
new file mode 100644
index 0000000..c844dfa
--- /dev/null
+++ b/arch/sh/boards/renesas/r7780rp/psw.c
@@ -0,0 +1,122 @@
+/*
+ * arch/sh/boards/renesas/r7780rp/psw.c
+ *
+ * push switch support for RDBRP-1/RDBREVRP-1 debug boards.
+ *
+ * Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/mach/r7780rp.h>
+#include <asm/push-switch.h>
+
+static irqreturn_t psw_irq_handler(int irq, void *arg)
+{
+	struct platform_device *pdev = arg;
+	struct push_switch *psw = platform_get_drvdata(pdev);
+	struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
+	unsigned int l, mask;
+	int ret = 0;
+
+	l = ctrl_inw(PA_DBSW);
+
+	/* Nothing to do if there's no state change */
+	if (psw->state) {
+		ret = 1;
+		goto out;
+	}
+
+	mask = l & 0x70;
+	/* Figure out who raised it */
+	if (mask & (1 << psw_info->bit)) {
+		psw->state = !!(mask & (1 << psw_info->bit));
+		if (psw->state)	/* debounce */
+			mod_timer(&psw->debounce, jiffies + 50);
+
+		ret = 1;
+	}
+
+out:
+	/* Clear the switch IRQs */
+	l |= (0x7 << 12);
+	ctrl_outw(l, PA_DBSW);
+
+	return IRQ_RETVAL(ret);
+}
+
+static struct resource psw_resources[] = {
+	[0] = {
+		.start	= IRQ_PSW,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct push_switch_platform_info s2_platform_data = {
+	.name		= "s2",
+	.bit		= 6,
+	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			  IRQF_SHARED,
+	.irq_handler	= psw_irq_handler,
+};
+
+static struct platform_device s2_switch_device = {
+	.name		= "push-switch",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(psw_resources),
+	.resource	= psw_resources,
+	.dev		= {
+		.platform_data = &s2_platform_data,
+	},
+};
+
+static struct push_switch_platform_info s3_platform_data = {
+	.name		= "s3",
+	.bit		= 5,
+	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			  IRQF_SHARED,
+	.irq_handler	= psw_irq_handler,
+};
+
+static struct platform_device s3_switch_device = {
+	.name		= "push-switch",
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(psw_resources),
+	.resource	= psw_resources,
+	.dev		= {
+		.platform_data = &s3_platform_data,
+	},
+};
+
+static struct push_switch_platform_info s4_platform_data = {
+	.name		= "s4",
+	.bit		= 4,
+	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			  IRQF_SHARED,
+	.irq_handler	= psw_irq_handler,
+};
+
+static struct platform_device s4_switch_device = {
+	.name		= "push-switch",
+	.id		= 2,
+	.num_resources	= ARRAY_SIZE(psw_resources),
+	.resource	= psw_resources,
+	.dev		= {
+		.platform_data = &s4_platform_data,
+	},
+};
+
+static struct platform_device *psw_devices[] = {
+	&s2_switch_device, &s3_switch_device, &s4_switch_device,
+};
+
+static int __init psw_init(void)
+{
+	return platform_add_devices(psw_devices, ARRAY_SIZE(psw_devices));
+}
+module_init(psw_init);
diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c
index c331cae..9f89c8d 100644
--- a/arch/sh/boards/renesas/r7780rp/setup.c
+++ b/arch/sh/boards/renesas/r7780rp/setup.c
@@ -44,8 +44,37 @@
 	.resource	= m66596_usb_host_resources,
 };
 
+static struct resource cf_ide_resources[] = {
+	[0] = {
+		.start	= 0x1f0,
+		.end	= 0x1f0 + 8,
+		.flags	= IORESOURCE_IO,
+	},
+	[1] = {
+		.start	= 0x1f0 + 0x206,
+		.end	= 0x1f0 + 8 + 0x206 + 8,
+		.flags	= IORESOURCE_IO,
+	},
+	[2] = {
+#ifdef CONFIG_SH_R7780MP
+		.start	= 1,
+#else
+		.start	= 4,
+#endif
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device cf_ide_device  = {
+	.name		= "pata_platform",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(cf_ide_resources),
+	.resource	= cf_ide_resources,
+};
+
 static struct platform_device *r7780rp_devices[] __initdata = {
 	&m66596_usb_host_device,
+	&cf_ide_device,
 };
 
 static int __init r7780rp_devices_setup(void)
diff --git a/arch/sh/boards/se/7206/Makefile b/arch/sh/boards/se/7206/Makefile
new file mode 100644
index 0000000..63950f4f
--- /dev/null
+++ b/arch/sh/boards/se/7206/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the 7206 SolutionEngine specific parts of the kernel
+#
+
+obj-y	 := setup.o io.o irq.o
+obj-$(CONFIG_HEARTBEAT) += led.o
+
diff --git a/arch/sh/boards/se/7206/io.c b/arch/sh/boards/se/7206/io.c
new file mode 100644
index 0000000..b557273
--- /dev/null
+++ b/arch/sh/boards/se/7206/io.c
@@ -0,0 +1,123 @@
+/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
+ *
+ * linux/arch/sh/boards/se/7206/io.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * I/O routine for Hitachi 7206 SolutionEngine.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/se7206.h>
+
+
+static inline void delay(void)
+{
+	ctrl_inw(0x20000000);  /* P2 ROM Area */
+}
+
+/* MS7750 requires special versions of in*, out* routines, since
+   PC-like io ports are located at upper half byte of 16-bit word which
+   can be accessed only with 16-bit wide.  */
+
+static inline volatile __u16 *
+port2adr(unsigned int port)
+{
+	if (port >= 0x2000)
+		return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
+	else if (port >= 0x300 || port < 0x310)
+		return (volatile __u16 *) (PA_SMSC + (port - 0x300));
+}
+
+unsigned char se7206_inb(unsigned long port)
+{
+	return (*port2adr(port))&0xff; 
+}
+
+unsigned char se7206_inb_p(unsigned long port)
+{
+	unsigned long v;
+
+	v = (*port2adr(port))&0xff; 
+	delay();
+	return v;
+}
+
+unsigned short se7206_inw(unsigned long port)
+{
+	return *port2adr(port);;
+}
+
+unsigned int se7206_inl(unsigned long port)
+{
+	maybebadio(port);
+	return 0;
+}
+
+void se7206_outb(unsigned char value, unsigned long port)
+{
+	*(port2adr(port)) = value;
+}
+
+void se7206_outb_p(unsigned char value, unsigned long port)
+{
+	*(port2adr(port)) = value;
+	delay();
+}
+
+void se7206_outw(unsigned short value, unsigned long port)
+{
+	*port2adr(port) = value;
+}
+
+void se7206_outl(unsigned int value, unsigned long port)
+{
+	maybebadio(port);
+}
+
+void se7206_insb(unsigned long port, void *addr, unsigned long count)
+{
+	volatile __u16 *p = port2adr(port);
+	__u8 *ap = addr;
+
+	while (count--)
+		*ap++ = *p;
+}
+
+void se7206_insw(unsigned long port, void *addr, unsigned long count)
+{
+	volatile __u16 *p = port2adr(port);
+	__u16 *ap = addr;
+	while (count--)
+		*ap++ = *p;
+}
+
+void se7206_insl(unsigned long port, void *addr, unsigned long count)
+{
+	maybebadio(port);
+}
+
+void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
+{
+	volatile __u16 *p = port2adr(port);
+	const __u8 *ap = addr;
+
+	while (count--)
+		*p = *ap++;
+}
+
+void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
+{
+	volatile __u16 *p = port2adr(port);
+	const __u16 *ap = addr;
+	while (count--)
+		*p = *ap++;
+}
+
+void se7206_outsl(unsigned long port, const void *addr, unsigned long count)
+{
+	maybebadio(port);
+}
diff --git a/arch/sh/boards/se/7206/irq.c b/arch/sh/boards/se/7206/irq.c
new file mode 100644
index 0000000..3fb0c5f
--- /dev/null
+++ b/arch/sh/boards/se/7206/irq.c
@@ -0,0 +1,139 @@
+/*
+ * linux/arch/sh/boards/se/7206/irq.c
+ *
+ * Copyright (C) 2005,2006 Yoshinori Sato
+ *
+ * Hitachi SolutionEngine Support.
+ *
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/se7206.h>
+
+#define INTSTS0 0x31800000
+#define INTSTS1 0x31800002
+#define INTMSK0 0x31800004
+#define INTMSK1 0x31800006
+#define INTSEL  0x31800008
+
+static void disable_se7206_irq(unsigned int irq)
+{
+	unsigned short val;
+	unsigned short mask = 0xffff ^ (0x0f << 4 * (3 - (IRQ0_IRQ - irq)));
+	unsigned short msk0,msk1;
+
+	/* Set the priority in IPR to 0 */
+	val = ctrl_inw(INTC_IPR01);
+	val &= mask;
+	ctrl_outw(val, INTC_IPR01);
+	/* FPGA mask set */
+	msk0 = ctrl_inw(INTMSK0);
+	msk1 = ctrl_inw(INTMSK1);
+
+	switch (irq) {
+	case IRQ0_IRQ:
+		msk0 |= 0x0010;
+		break;
+	case IRQ1_IRQ:
+		msk0 |= 0x000f;
+		break;
+	case IRQ2_IRQ:
+		msk0 |= 0x0f00;
+		msk1 |= 0x00ff;
+		break;
+	}
+	ctrl_outw(msk0, INTMSK0);
+	ctrl_outw(msk1, INTMSK1);
+}
+
+static void enable_se7206_irq(unsigned int irq)
+{
+	unsigned short val;
+	unsigned short value = (0x0001 << 4 * (3 - (IRQ0_IRQ - irq)));
+	unsigned short msk0,msk1;
+
+	/* Set priority in IPR back to original value */
+	val = ctrl_inw(INTC_IPR01);
+	val |= value;
+	ctrl_outw(val, INTC_IPR01);
+
+	/* FPGA mask reset */
+	msk0 = ctrl_inw(INTMSK0);
+	msk1 = ctrl_inw(INTMSK1);
+
+	switch (irq) {
+	case IRQ0_IRQ:
+		msk0 &= ~0x0010;
+		break;
+	case IRQ1_IRQ:
+		msk0 &= ~0x000f;
+		break;
+	case IRQ2_IRQ:
+		msk0 &= ~0x0f00;
+		msk1 &= ~0x00ff;
+		break;
+	}
+	ctrl_outw(msk0, INTMSK0);
+	ctrl_outw(msk1, INTMSK1);
+}
+
+static void eoi_se7206_irq(unsigned int irq)
+{
+	unsigned short sts0,sts1;
+
+	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+		enable_se7206_irq(irq);
+	/* FPGA isr clear */
+	sts0 = ctrl_inw(INTSTS0);
+	sts1 = ctrl_inw(INTSTS1);
+
+	switch (irq) {
+	case IRQ0_IRQ:
+		sts0 &= ~0x0010;
+		break;
+	case IRQ1_IRQ:
+		sts0 &= ~0x000f;
+		break;
+	case IRQ2_IRQ:
+		sts0 &= ~0x0f00;
+		sts1 &= ~0x00ff;
+		break;
+	}
+	ctrl_outw(sts0, INTSTS0);
+	ctrl_outw(sts1, INTSTS1);
+}
+
+static struct irq_chip se7206_irq_chip __read_mostly = {
+	.name		= "SE7206-FPGA-IRQ",
+	.mask		= disable_se7206_irq,
+	.unmask		= enable_se7206_irq,
+	.mask_ack	= disable_se7206_irq,
+	.eoi		= eoi_se7206_irq,
+};
+
+static void make_se7206_irq(unsigned int irq)
+{
+	disable_irq_nosync(irq);
+	set_irq_chip_and_handler_name(irq, &se7206_irq_chip,
+				      handle_level_irq, "level");
+	disable_se7206_irq(irq);
+}
+
+/*
+ * Initialize IRQ setting
+ */
+void __init init_se7206_IRQ(void)
+{
+	make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
+	make_se7206_irq(IRQ1_IRQ); /* ATA */
+	make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
+	ctrl_outw(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
+
+	/* FPGA System register setup*/
+	ctrl_outw(0x0000,INTSTS0); /* Clear INTSTS0 */
+	ctrl_outw(0x0000,INTSTS1); /* Clear INTSTS1 */
+	/* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
+	ctrl_outw(0x0001,INTSEL);
+}
diff --git a/arch/sh/boards/se/7206/led.c b/arch/sh/boards/se/7206/led.c
new file mode 100644
index 0000000..ef79460
--- /dev/null
+++ b/arch/sh/boards/se/7206/led.c
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/sh/kernel/led_se.c
+ *
+ * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * This file contains Solution Engine specific LED code.
+ */
+
+#include <linux/config.h>
+#include <asm/se7206.h>
+
+#ifdef CONFIG_HEARTBEAT
+
+#include <linux/sched.h>
+
+/* Cycle the LED's in the clasic Knightrider/Sun pattern */
+void heartbeat_se(void)
+{
+	static unsigned int cnt = 0, period = 0;
+	volatile unsigned short* p = (volatile unsigned short*)PA_LED;
+	static unsigned bit = 0, up = 1;
+
+	cnt += 1;
+	if (cnt < period) {
+		return;
+	}
+
+	cnt = 0;
+
+	/* Go through the points (roughly!):
+	 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
+	 */
+	period = 110 - ( (300<<FSHIFT)/
+			 ((avenrun[0]/5) + (3<<FSHIFT)) );
+
+	if (up) {
+		if (bit == 7) {
+			bit--;
+			up=0;
+		} else {
+			bit ++;
+		}
+	} else {
+		if (bit == 0) {
+			bit++;
+			up=1;
+		} else {
+			bit--;
+		}
+	}
+	*p = 1<<(bit+8);
+
+}
+#endif /* CONFIG_HEARTBEAT */
diff --git a/arch/sh/boards/se/7206/setup.c b/arch/sh/boards/se/7206/setup.c
new file mode 100644
index 0000000..0f42e91
--- /dev/null
+++ b/arch/sh/boards/se/7206/setup.c
@@ -0,0 +1,79 @@
+/*
+ *
+ * linux/arch/sh/boards/se/7206/setup.c
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * Hitachi 7206 SolutionEngine Support.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/se7206.h>
+#include <asm/io.h>
+#include <asm/machvec.h>
+
+static struct resource smc91x_resources[] = {
+	[0] = {
+		.start		= 0x300,
+		.end		= 0x300 + 0x020 - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start		= 64,
+		.end		= 64,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device smc91x_device = {
+	.name		= "smc91x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smc91x_resources),
+	.resource	= smc91x_resources,
+};
+
+static int __init se7206_devices_setup(void)
+{
+	return platform_device_register(&smc91x_device);
+}
+
+__initcall(se7206_devices_setup);
+
+void heartbeat_se(void);
+
+/*
+ * The Machine Vector
+ */
+
+struct sh_machine_vector mv_se __initmv = {
+	.mv_name		= "SolutionEngine",
+	.mv_nr_irqs		= 256,
+	.mv_inb			= se7206_inb,
+	.mv_inw			= se7206_inw,
+	.mv_inl			= se7206_inl,
+	.mv_outb		= se7206_outb,
+	.mv_outw		= se7206_outw,
+	.mv_outl		= se7206_outl,
+
+	.mv_inb_p		= se7206_inb_p,
+	.mv_inw_p		= se7206_inw,
+	.mv_inl_p		= se7206_inl,
+	.mv_outb_p		= se7206_outb_p,
+	.mv_outw_p		= se7206_outw,
+	.mv_outl_p		= se7206_outl,
+
+	.mv_insb		= se7206_insb,
+	.mv_insw		= se7206_insw,
+	.mv_insl		= se7206_insl,
+	.mv_outsb		= se7206_outsb,
+	.mv_outsw		= se7206_outsw,
+	.mv_outsl		= se7206_outsl,
+
+	.mv_init_irq		= init_se7206_IRQ,
+#ifdef CONFIG_HEARTBEAT
+	.mv_heartbeat		= heartbeat_se,
+#endif
+};
+ALIAS_MV(se)
diff --git a/arch/sh/boards/se/7619/Makefile b/arch/sh/boards/se/7619/Makefile
new file mode 100644
index 0000000..3666eca
--- /dev/null
+++ b/arch/sh/boards/se/7619/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the 7619 SolutionEngine specific parts of the kernel
+#
+
+obj-y	 := setup.o io.o
diff --git a/arch/sh/boards/se/7619/io.c b/arch/sh/boards/se/7619/io.c
new file mode 100644
index 0000000..176f1f39c
--- /dev/null
+++ b/arch/sh/boards/se/7619/io.c
@@ -0,0 +1,102 @@
+/*
+ *
+ * linux/arch/sh/boards/se/7619/io.c
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * I/O routine for Hitachi 7619 SolutionEngine.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/se7619.h>
+#include <asm/irq.h>
+
+/* FIXME: M3A-ZAB7 Compact Flash Slot support */
+
+static inline void delay(void)
+{
+	ctrl_inw(0xa0000000);	/* Uncached ROM area (P2) */
+}
+
+#define badio(name,port) \
+  printk("bad I/O operation (%s) for port 0x%lx at 0x%08x\n", \
+	 #name, (port), (__u32) __builtin_return_address(0))
+
+unsigned char se7619___inb(unsigned long port)
+{
+	badio(inb, port);
+	return 0;
+}
+
+unsigned char se7619___inb_p(unsigned long port)
+{
+	badio(inb_p, port);
+	delay();
+	return 0;
+}
+
+unsigned short se7619___inw(unsigned long port)
+{
+	badio(inw, port);
+	return 0;
+}
+
+unsigned int se7619___inl(unsigned long port)
+{
+	badio(inl, port);
+	return 0;
+}
+
+void se7619___outb(unsigned char value, unsigned long port)
+{
+	badio(outb, port);
+}
+
+void se7619___outb_p(unsigned char value, unsigned long port)
+{
+	badio(outb_p, port);
+	delay();
+}
+
+void se7619___outw(unsigned short value, unsigned long port)
+{
+	badio(outw, port);
+}
+
+void se7619___outl(unsigned int value, unsigned long port)
+{
+	badio(outl, port);
+}
+
+void se7619___insb(unsigned long port, void *addr, unsigned long count)
+{
+	badio(inw, port);
+}
+
+void se7619___insw(unsigned long port, void *addr, unsigned long count)
+{
+	badio(inw, port);
+}
+
+void se7619___insl(unsigned long port, void *addr, unsigned long count)
+{
+	badio(insl, port);
+}
+
+void se7619___outsb(unsigned long port, const void *addr, unsigned long count)
+{
+	badio(insl, port);
+}
+
+void se7619___outsw(unsigned long port, const void *addr, unsigned long count)
+{
+	badio(insl, port);
+}
+
+void se7619___outsl(unsigned long port, const void *addr, unsigned long count)
+{
+	badio(outsw, port);
+}
diff --git a/arch/sh/boards/se/7619/setup.c b/arch/sh/boards/se/7619/setup.c
new file mode 100644
index 0000000..e627b26
--- /dev/null
+++ b/arch/sh/boards/se/7619/setup.c
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/boards/se/7619/setup.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Hitachi SH7619 SolutionEngine Support.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/se7619.h>
+#include <asm/machvec.h>
+
+/*
+ * The Machine Vector
+ */
+
+struct sh_machine_vector mv_se __initmv = {
+	.mv_name		= "SolutionEngine",
+	.mv_nr_irqs		= 108,
+	.mv_inb			= se7619___inb,
+	.mv_inw			= se7619___inw,
+	.mv_inl			= se7619___inl,
+	.mv_outb		= se7619___outb,
+	.mv_outw		= se7619___outw,
+	.mv_outl		= se7619___outl,
+
+	.mv_inb_p		= se7619___inb_p,
+	.mv_inw_p		= se7619___inw,
+	.mv_inl_p		= se7619___inl,
+	.mv_outb_p		= se7619___outb_p,
+	.mv_outw_p		= se7619___outw,
+	.mv_outl_p		= se7619___outl,
+
+	.mv_insb		= se7619___insb,
+	.mv_insw		= se7619___insw,
+	.mv_insl		= se7619___insl,
+	.mv_outsb		= se7619___outsb,
+	.mv_outsw		= se7619___outsw,
+	.mv_outsl		= se7619___outsl,
+};
+ALIAS_MV(se)
diff --git a/arch/sh/boards/titan/setup.c b/arch/sh/boards/titan/setup.c
index a6046d9..6bcd939 100644
--- a/arch/sh/boards/titan/setup.c
+++ b/arch/sh/boards/titan/setup.c
@@ -1,26 +1,30 @@
 /*
- *	Setup for Titan
+ * arch/sh/boards/titan/setup.c - Setup for Titan
+ *
+ *  Copyright (C) 2006  Jamie Lenehan
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  */
-
 #include <linux/init.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
 #include <asm/titan.h>
 #include <asm/io.h>
 
-extern void __init pcibios_init_platform(void);
-
 static struct ipr_data titan_ipr_map[] = {
-	{ TITAN_IRQ_WAN,	IRL0_IPR_ADDR,	IRL0_IPR_POS,	IRL0_PRIORITY },
-	{ TITAN_IRQ_LAN,	IRL1_IPR_ADDR,	IRL1_IPR_POS,	IRL1_PRIORITY },
-	{ TITAN_IRQ_MPCIA,	IRL2_IPR_ADDR,	IRL2_IPR_POS,	IRL2_PRIORITY },
-	{ TITAN_IRQ_USB,	IRL3_IPR_ADDR,	IRL3_IPR_POS,	IRL3_PRIORITY },
+	/* IRQ, IPR idx, shift, prio */
+	{ TITAN_IRQ_WAN,   3, 12, 8 },	/* eth0 (WAN) */
+	{ TITAN_IRQ_LAN,   3,  8, 8 },	/* eth1 (LAN) */
+	{ TITAN_IRQ_MPCIA, 3,  4, 8 },	/* mPCI A (top) */
+	{ TITAN_IRQ_USB,   3,  0, 8 },	/* mPCI B (bottom), USB */
 };
 
 static void __init init_titan_irq(void)
 {
 	/* enable individual interrupt mode for externals */
-	ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
-
+	ipr_irq_enable_irlm();
+	/* register ipr irqs */
 	make_ipr_irq(titan_ipr_map, ARRAY_SIZE(titan_ipr_map));
 }
 
@@ -47,6 +51,5 @@
 	.mv_ioport_map = titan_ioport_map,
 
 	.mv_init_irq =	init_titan_irq,
-	.mv_init_pci =	pcibios_init_platform,
 };
 ALIAS_MV(titan)
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index f2fed5c..35452d8 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -12,6 +12,7 @@
  */
 
 #include <asm/uaccess.h>
+#include <asm/addrspace.h>
 #ifdef CONFIG_SH_STANDARD_BIOS
 #include <asm/sh_bios.h>
 #endif
@@ -228,7 +229,7 @@
 void decompress_kernel(void)
 {
 	output_data = 0;
-	output_ptr = (unsigned long)&_text+0x20001000;
+	output_ptr = P2SEGADDR((unsigned long)&_text+0x1000);
 	free_mem_ptr = (unsigned long)&_end;
 	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
 
diff --git a/arch/sh/configs/r7780rp_defconfig b/arch/sh/configs/r7780rp_defconfig
index 34e2046..2b75b48 100644
--- a/arch/sh/configs/r7780rp_defconfig
+++ b/arch/sh/configs/r7780rp_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc3
-# Tue Oct 31 12:32:06 2006
+# Linux kernel version: 2.6.19
+# Wed Dec  6 11:59:38 2006
 #
 CONFIG_SUPERH=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
@@ -11,6 +11,8 @@
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 # CONFIG_GENERIC_TIME is not set
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 
 #
@@ -37,6 +39,7 @@
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -118,6 +121,8 @@
 # CONFIG_SH_LANDISK is not set
 # CONFIG_SH_TITAN is not set
 # CONFIG_SH_SHMIN is not set
+# CONFIG_SH_7206_SOLUTION_ENGINE is not set
+# CONFIG_SH_7619_SOLUTION_ENGINE is not set
 # CONFIG_SH_UNKNOWN is not set
 
 #
@@ -130,6 +135,12 @@
 # SH-2 Processor Support
 #
 # CONFIG_CPU_SUBTYPE_SH7604 is not set
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+
+#
+# SH-2A Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7206 is not set
 
 #
 # SH-3 Processor Support
@@ -165,6 +176,7 @@
 #
 # CONFIG_CPU_SUBTYPE_SH7770 is not set
 CONFIG_CPU_SUBTYPE_SH7780=y
+# CONFIG_CPU_SUBTYPE_SH7785 is not set
 
 #
 # SH4AL-DSP Processor Support
@@ -181,8 +193,14 @@
 CONFIG_MEMORY_SIZE=0x08000000
 # CONFIG_32BIT is not set
 CONFIG_VSYSCALL=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
 CONFIG_HUGETLB_PAGE_SIZE_64K=y
+# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
 # CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
 CONFIG_SELECT_MEMORY_MODEL=y
 CONFIG_FLATMEM_MANUAL=y
 # CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -204,12 +222,14 @@
 # Processor features
 #
 CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
 CONFIG_SH_FPU=y
 # CONFIG_SH_DSP is not set
 CONFIG_SH_STORE_QUEUES=y
 CONFIG_CPU_HAS_INTEVT=y
 CONFIG_CPU_HAS_INTC2_IRQ=y
 CONFIG_CPU_HAS_SR_RB=y
+CONFIG_CPU_HAS_PTEA=y
 
 #
 # Timer support
@@ -220,6 +240,8 @@
 # R7780RP options
 #
 CONFIG_SH_R7780MP=y
+CONFIG_SH_TIMER_IRQ=28
+CONFIG_NO_IDLE_HZ=y
 CONFIG_SH_PCLK_FREQ=32000000
 
 #
@@ -238,13 +260,18 @@
 # CONFIG_HD6446X_SERIES is not set
 
 #
+# Additional SuperH Device Drivers
+#
+CONFIG_PUSH_SWITCH=y
+
+#
 # Kernel features
 #
 # CONFIG_HZ_100 is not set
 CONFIG_HZ_250=y
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
-# CONFIG_KEXEC is not set
+CONFIG_KEXEC=y
 # CONFIG_SMP is not set
 # CONFIG_PREEMPT_NONE is not set
 # CONFIG_PREEMPT_VOLUNTARY is not set
@@ -278,10 +305,7 @@
 #
 # PCI Hotplug Support
 #
-CONFIG_HOTPLUG_PCI=y
-# CONFIG_HOTPLUG_PCI_FAKE is not set
-# CONFIG_HOTPLUG_PCI_CPCI is not set
-# CONFIG_HOTPLUG_PCI_SHPC is not set
+# CONFIG_HOTPLUG_PCI is not set
 
 #
 # Executable file formats
@@ -341,6 +365,7 @@
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_INET6_XFRM_TUNNEL is not set
 # CONFIG_INET6_TUNNEL is not set
@@ -556,6 +581,7 @@
 # CONFIG_PATA_IT821X is not set
 # CONFIG_PATA_JMICRON is not set
 # CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
 # CONFIG_PATA_MPIIX is not set
 # CONFIG_PATA_OLDPIIX is not set
 # CONFIG_PATA_NETCELL is not set
@@ -572,6 +598,7 @@
 # CONFIG_PATA_SIS is not set
 # CONFIG_PATA_VIA is not set
 # CONFIG_PATA_WINBOND is not set
+CONFIG_PATA_PLATFORM=y
 
 #
 # Multi-device support (RAID and LVM)
@@ -688,6 +715,7 @@
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
 
 #
 # Token Ring devices
@@ -830,10 +858,6 @@
 # CONFIG_DTLK is not set
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
 # CONFIG_DRM is not set
 # CONFIG_RAW_DRIVER is not set
 
@@ -1020,7 +1044,7 @@
 CONFIG_DNOTIFY=y
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
+CONFIG_FUSE_FS=m
 
 #
 # CD-ROM/DVD Filesystems
@@ -1052,7 +1076,7 @@
 CONFIG_HUGETLBFS=y
 CONFIG_HUGETLB_PAGE=y
 CONFIG_RAMFS=y
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_CONFIGFS_FS=m
 
 #
 # Miscellaneous filesystems
@@ -1153,28 +1177,33 @@
 #
 # Profiling support
 #
-# CONFIG_PROFILING is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
 
 #
 # Kernel hacking
 #
-# CONFIG_PRINTK_TIME is not set
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
 CONFIG_ENABLE_MUST_CHECK=y
-# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_KERNEL=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_DETECT_SOFTLOCKUP=y
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_DEBUG_SLAB is not set
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
 # CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 # CONFIG_DEBUG_VM is not set
 # CONFIG_DEBUG_LIST is not set
@@ -1184,7 +1213,7 @@
 # CONFIG_RCU_TORTURE_TEST is not set
 # CONFIG_SH_STANDARD_BIOS is not set
 # CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_4KSTACKS is not set
 # CONFIG_KGDB is not set
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
new file mode 100644
index 0000000..36cec0b
--- /dev/null
+++ b/arch/sh/configs/se7206_defconfig
@@ -0,0 +1,826 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc4
+# Sun Nov  5 16:20:10 2006
+#
+CONFIG_SUPERH=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_GENERIC_TIME is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SYSVIPC is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_TINY_SHMEM=y
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+
+#
+# System type
+#
+# CONFIG_SH_SOLUTION_ENGINE is not set
+# CONFIG_SH_7751_SOLUTION_ENGINE is not set
+# CONFIG_SH_7300_SOLUTION_ENGINE is not set
+# CONFIG_SH_7343_SOLUTION_ENGINE is not set
+# CONFIG_SH_73180_SOLUTION_ENGINE is not set
+# CONFIG_SH_7751_SYSTEMH is not set
+# CONFIG_SH_HP6XX is not set
+# CONFIG_SH_EC3104 is not set
+# CONFIG_SH_SATURN is not set
+# CONFIG_SH_DREAMCAST is not set
+# CONFIG_SH_BIGSUR is not set
+# CONFIG_SH_MPC1211 is not set
+# CONFIG_SH_SH03 is not set
+# CONFIG_SH_SECUREEDGE5410 is not set
+# CONFIG_SH_HS7751RVOIP is not set
+# CONFIG_SH_7710VOIPGW is not set
+# CONFIG_SH_RTS7751R2D is not set
+# CONFIG_SH_R7780RP is not set
+# CONFIG_SH_EDOSK7705 is not set
+# CONFIG_SH_SH4202_MICRODEV is not set
+# CONFIG_SH_LANDISK is not set
+# CONFIG_SH_TITAN is not set
+# CONFIG_SH_SHMIN is not set
+CONFIG_SH_7206_SOLUTION_ENGINE=y
+# CONFIG_SH_7619_SOLUTION_ENGINE is not set
+# CONFIG_SH_UNKNOWN is not set
+
+#
+# Processor selection
+#
+CONFIG_CPU_SH2=y
+CONFIG_CPU_SH2A=y
+
+#
+# SH-2 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7604 is not set
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+
+#
+# SH-2A Processor Support
+#
+CONFIG_CPU_SUBTYPE_SH7206=y
+
+#
+# SH-3 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7300 is not set
+# CONFIG_CPU_SUBTYPE_SH7705 is not set
+# CONFIG_CPU_SUBTYPE_SH7706 is not set
+# CONFIG_CPU_SUBTYPE_SH7707 is not set
+# CONFIG_CPU_SUBTYPE_SH7708 is not set
+# CONFIG_CPU_SUBTYPE_SH7709 is not set
+# CONFIG_CPU_SUBTYPE_SH7710 is not set
+
+#
+# SH-4 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7750 is not set
+# CONFIG_CPU_SUBTYPE_SH7091 is not set
+# CONFIG_CPU_SUBTYPE_SH7750R is not set
+# CONFIG_CPU_SUBTYPE_SH7750S is not set
+# CONFIG_CPU_SUBTYPE_SH7751 is not set
+# CONFIG_CPU_SUBTYPE_SH7751R is not set
+# CONFIG_CPU_SUBTYPE_SH7760 is not set
+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+
+#
+# ST40 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_ST40STB1 is not set
+# CONFIG_CPU_SUBTYPE_ST40GX1 is not set
+
+#
+# SH-4A Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7770 is not set
+# CONFIG_CPU_SUBTYPE_SH7780 is not set
+
+#
+# SH4AL-DSP Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH73180 is not set
+# CONFIG_CPU_SUBTYPE_SH7343 is not set
+
+#
+# Memory management options
+#
+CONFIG_PAGE_OFFSET=0x00000000
+CONFIG_MEMORY_START=0x0c000000
+CONFIG_MEMORY_SIZE=0x02000000
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+
+#
+# Cache configuration
+#
+# CONFIG_SH_DIRECT_MAPPED is not set
+# CONFIG_SH_WRITETHROUGH is not set
+# CONFIG_SH_OCRAM is not set
+
+#
+# Processor features
+#
+# CONFIG_CPU_LITTLE_ENDIAN is not set
+# CONFIG_SH_FPU is not set
+# CONFIG_SH_FPU_EMU is not set
+# CONFIG_SH_DSP is not set
+
+#
+# Timer support
+#
+CONFIG_SH_CMT=y
+# CONFIG_SH_MTU2 is not set
+CONFIG_SH_PCLK_FREQ=33333333
+CONFIG_SH_CLK_MD=6
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# DMA support
+#
+# CONFIG_SH_DMA is not set
+
+#
+# Companion Chips
+#
+# CONFIG_HD6446X_SERIES is not set
+
+#
+# Kernel features
+#
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_KEXEC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+
+#
+# Boot options
+#
+CONFIG_ZERO_PAGE_OFFSET=0x00001000
+CONFIG_BOOT_LINK_OFFSET=0x00800000
+# CONFIG_UBC_WAKEUP is not set
+# CONFIG_CMDLINE_BOOL is not set
+
+#
+# Bus options
+#
+# CONFIG_PCI is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+
+#
+# PCI Hotplug Support
+#
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options (EXPERIMENTAL)
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+# CONFIG_PACKET is not set
+# CONFIG_UNIX is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x20000000
+CONFIG_MTD_PHYSMAP_LEN=0x1000000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=4
+# CONFIG_MTD_SOLUTIONENGINE is not set
+# CONFIG_MTD_UCLINUX is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_NETLINK is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+# CONFIG_NETDEVICES is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=4
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_UNIX98_PTYS is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_SYSFS is not set
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_UNWIND_INFO is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_SH_STANDARD_BIOS is not set
+# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_KGDB is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/sh/drivers/Kconfig b/arch/sh/drivers/Kconfig
new file mode 100644
index 0000000..c54c758
--- /dev/null
+++ b/arch/sh/drivers/Kconfig
@@ -0,0 +1,9 @@
+menu "Additional SuperH Device Drivers"
+
+config PUSH_SWITCH
+	tristate "Push switch support"
+	help
+	  This enables support for the push switch framework, a simple
+	  framework that allows for sysfs driven switch status reporting.
+
+endmenu
diff --git a/arch/sh/drivers/Makefile b/arch/sh/drivers/Makefile
index 338c372..bf18dbf 100644
--- a/arch/sh/drivers/Makefile
+++ b/arch/sh/drivers/Makefile
@@ -5,4 +5,4 @@
 obj-$(CONFIG_PCI)		+= pci/
 obj-$(CONFIG_SH_DMA)		+= dma/
 obj-$(CONFIG_SUPERHYWAY)	+= superhyway/
-
+obj-$(CONFIG_PUSH_SWITCH)	+= push-switch.o
diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile
index 065d4c9..db1295d 100644
--- a/arch/sh/drivers/dma/Makefile
+++ b/arch/sh/drivers/dma/Makefile
@@ -2,8 +2,8 @@
 # Makefile for the SuperH DMA specific kernel interface routines under Linux.
 #
 
-obj-y				+= dma-api.o dma-isa.o
+obj-y				+= dma-api.o
+obj-$(CONFIG_ISA_DMA_API)	+= dma-isa.o
 obj-$(CONFIG_SYSFS)		+= dma-sysfs.o
 obj-$(CONFIG_SH_DMA)		+= dma-sh.o
 obj-$(CONFIG_SH_DREAMCAST)	+= dma-pvr2.o dma-g2.o
-
diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c
index 47c3e83..e062067 100644
--- a/arch/sh/drivers/dma/dma-api.c
+++ b/arch/sh/drivers/dma/dma-api.c
@@ -11,61 +11,27 @@
  */
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/proc_fs.h>
 #include <linux/list.h>
 #include <linux/platform_device.h>
+#include <linux/mm.h>
 #include <asm/dma.h>
 
 DEFINE_SPINLOCK(dma_spin_lock);
 static LIST_HEAD(registered_dmac_list);
 
-/*
- * A brief note about the reasons for this API as it stands.
- *
- * For starters, the old ISA DMA API didn't work for us for a number of
- * reasons, for one, the vast majority of channels on the SH DMAC are
- * dual-address mode only, and both the new and the old DMA APIs are after the
- * concept of managing a DMA buffer, which doesn't overly fit this model very
- * well. In addition to which, the new API is largely geared at IOMMUs and
- * GARTs, and doesn't even support the channel notion very well.
- *
- * The other thing that's a marginal issue, is the sheer number of random DMA
- * engines that are present (ie, in boards like the Dreamcast), some of which
- * cascade off of the SH DMAC, and others do not. As such, there was a real
- * need for a scalable subsystem that could deal with both single and
- * dual-address mode usage, in addition to interoperating with cascaded DMACs.
- *
- * There really isn't any reason why this needs to be SH specific, though I'm
- * not aware of too many other processors (with the exception of some MIPS)
- * that have the same concept of a dual address mode, or any real desire to
- * actually make use of the DMAC even if such a subsystem were exposed
- * elsewhere.
- *
- * The idea for this was derived from the ARM port, which acted as an excellent
- * reference when trying to address these issues.
- *
- * It should also be noted that the decision to add Yet Another DMA API(tm) to
- * the kernel wasn't made easily, and was only decided upon after conferring
- * with jejb with regards to the state of the old and new APIs as they applied
- * to these circumstances. Philip Blundell was also a great help in figuring
- * out some single-address mode DMA semantics that were otherwise rather
- * confusing.
- */
-
 struct dma_info *get_dma_info(unsigned int chan)
 {
 	struct dma_info *info;
-	unsigned int total = 0;
 
 	/*
 	 * Look for each DMAC's range to determine who the owner of
 	 * the channel is.
 	 */
 	list_for_each_entry(info, &registered_dmac_list, list) {
-		total += info->nr_channels;
-		if (chan > total)
+		if ((chan <  info->first_channel_nr) ||
+		    (chan >= info->first_channel_nr + info->nr_channels))
 			continue;
 
 		return info;
@@ -73,6 +39,22 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(get_dma_info);
+
+struct dma_info *get_dma_info_by_name(const char *dmac_name)
+{
+	struct dma_info *info;
+
+	list_for_each_entry(info, &registered_dmac_list, list) {
+		if (dmac_name && (strcmp(dmac_name, info->name) != 0))
+			continue;
+		else
+			return info;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(get_dma_info_by_name);
 
 static unsigned int get_nr_channels(void)
 {
@@ -91,63 +73,161 @@
 struct dma_channel *get_dma_channel(unsigned int chan)
 {
 	struct dma_info *info = get_dma_info(chan);
+	struct dma_channel *channel;
+	int i;
 
-	if (!info)
+	if (unlikely(!info))
 		return ERR_PTR(-EINVAL);
 
-	return info->channels + chan;
+	for (i = 0; i < info->nr_channels; i++) {
+		channel = &info->channels[i];
+		if (channel->chan == chan)
+			return channel;
+	}
+
+	return NULL;
 }
+EXPORT_SYMBOL(get_dma_channel);
 
 int get_dma_residue(unsigned int chan)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	if (info->ops->get_residue)
 		return info->ops->get_residue(channel);
 
 	return 0;
 }
+EXPORT_SYMBOL(get_dma_residue);
 
-int request_dma(unsigned int chan, const char *dev_id)
+static int search_cap(const char **haystack, const char *needle)
 {
-	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	const char **p;
 
-	down(&channel->sem);
-
-	if (!info->ops || chan >= MAX_DMA_CHANNELS) {
-		up(&channel->sem);
-		return -EINVAL;
-	}
-
-	atomic_set(&channel->busy, 1);
-
-	strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
-
-	up(&channel->sem);
-
-	if (info->ops->request)
-		return info->ops->request(channel);
+	for (p = haystack; *p; p++)
+		if (strcmp(*p, needle) == 0)
+			return 1;
 
 	return 0;
 }
 
+/**
+ * request_dma_bycap - Allocate a DMA channel based on its capabilities
+ * @dmac: List of DMA controllers to search
+ * @caps: List of capabilites
+ *
+ * Search all channels of all DMA controllers to find a channel which
+ * matches the requested capabilities. The result is the channel
+ * number if a match is found, or %-ENODEV if no match is found.
+ *
+ * Note that not all DMA controllers export capabilities, in which
+ * case they can never be allocated using this API, and so
+ * request_dma() must be used specifying the channel number.
+ */
+int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id)
+{
+	unsigned int found = 0;
+	struct dma_info *info;
+	const char **p;
+	int i;
+
+	BUG_ON(!dmac || !caps);
+
+	list_for_each_entry(info, &registered_dmac_list, list)
+		if (strcmp(*dmac, info->name) == 0) {
+			found = 1;
+			break;
+		}
+
+	if (!found)
+		return -ENODEV;
+
+	for (i = 0; i < info->nr_channels; i++) {
+		struct dma_channel *channel = &info->channels[i];
+
+		if (unlikely(!channel->caps))
+			continue;
+
+		for (p = caps; *p; p++) {
+			if (!search_cap(channel->caps, *p))
+				break;
+			if (request_dma(channel->chan, dev_id) == 0)
+				return channel->chan;
+		}
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(request_dma_bycap);
+
+int dmac_search_free_channel(const char *dev_id)
+{
+	struct dma_channel *channel = { 0 };
+	struct dma_info *info = get_dma_info(0);
+	int i;
+
+	for (i = 0; i < info->nr_channels; i++) {
+		channel = &info->channels[i];
+		if (unlikely(!channel))
+			return -ENODEV;
+
+		if (atomic_read(&channel->busy) == 0)
+			break;
+	}
+
+	if (info->ops->request) {
+		int result = info->ops->request(channel);
+		if (result)
+			return result;
+
+		atomic_set(&channel->busy, 1);
+		return channel->chan;
+	}
+
+	return -ENOSYS;
+}
+
+int request_dma(unsigned int chan, const char *dev_id)
+{
+	struct dma_channel *channel = { 0 };
+	struct dma_info *info = get_dma_info(chan);
+	int result;
+
+	channel = get_dma_channel(chan);
+	if (atomic_xchg(&channel->busy, 1))
+		return -EBUSY;
+
+	strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
+
+	if (info->ops->request) {
+		result = info->ops->request(channel);
+		if (result)
+			atomic_set(&channel->busy, 0);
+
+		return result;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(request_dma);
+
 void free_dma(unsigned int chan)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	if (info->ops->free)
 		info->ops->free(channel);
 
 	atomic_set(&channel->busy, 0);
 }
+EXPORT_SYMBOL(free_dma);
 
 void dma_wait_for_completion(unsigned int chan)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	if (channel->flags & DMA_TEI_CAPABLE) {
 		wait_event(channel->wait_queue,
@@ -158,21 +238,52 @@
 	while (info->ops->get_residue(channel))
 		cpu_relax();
 }
+EXPORT_SYMBOL(dma_wait_for_completion);
+
+int register_chan_caps(const char *dmac, struct dma_chan_caps *caps)
+{
+	struct dma_info *info;
+	unsigned int found = 0;
+	int i;
+
+	list_for_each_entry(info, &registered_dmac_list, list)
+		if (strcmp(dmac, info->name) == 0) {
+			found = 1;
+			break;
+		}
+
+	if (unlikely(!found))
+		return -ENODEV;
+
+	for (i = 0; i < info->nr_channels; i++, caps++) {
+		struct dma_channel *channel;
+
+		if ((info->first_channel_nr + i) != caps->ch_num)
+			return -EINVAL;
+
+		channel = &info->channels[i];
+		channel->caps = caps->caplist;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(register_chan_caps);
 
 void dma_configure_channel(unsigned int chan, unsigned long flags)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	if (info->ops->configure)
 		info->ops->configure(channel, flags);
 }
+EXPORT_SYMBOL(dma_configure_channel);
 
 int dma_xfer(unsigned int chan, unsigned long from,
 	     unsigned long to, size_t size, unsigned int mode)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	channel->sar	= from;
 	channel->dar	= to;
@@ -181,8 +292,20 @@
 
 	return info->ops->xfer(channel);
 }
+EXPORT_SYMBOL(dma_xfer);
 
-#ifdef CONFIG_PROC_FS
+int dma_extend(unsigned int chan, unsigned long op, void *param)
+{
+	struct dma_info *info = get_dma_info(chan);
+	struct dma_channel *channel = get_dma_channel(chan);
+
+	if (info->ops->extend)
+		return info->ops->extend(channel, op, param);
+
+	return -ENOSYS;
+}
+EXPORT_SYMBOL(dma_extend);
+
 static int dma_read_proc(char *buf, char **start, off_t off,
 			 int len, int *eof, void *data)
 {
@@ -214,8 +337,6 @@
 
 	return p - buf;
 }
-#endif
-
 
 int register_dmac(struct dma_info *info)
 {
@@ -224,8 +345,7 @@
 	INIT_LIST_HEAD(&info->list);
 
 	printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n",
-	       info->name, info->nr_channels,
-	       info->nr_channels > 1 ? "s" : "");
+	       info->name, info->nr_channels, info->nr_channels > 1 ? "s" : "");
 
 	BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels);
 
@@ -242,28 +362,26 @@
 
 		size = sizeof(struct dma_channel) * info->nr_channels;
 
-		info->channels = kmalloc(size, GFP_KERNEL);
+		info->channels = kzalloc(size, GFP_KERNEL);
 		if (!info->channels)
 			return -ENOMEM;
-
-		memset(info->channels, 0, size);
 	}
 
 	total_channels = get_nr_channels();
 	for (i = 0; i < info->nr_channels; i++) {
-		struct dma_channel *chan = info->channels + i;
+		struct dma_channel *chan = &info->channels[i];
 
-		chan->chan = i;
-		chan->vchan = i + total_channels;
+		atomic_set(&chan->busy, 0);
+
+		chan->chan  = info->first_channel_nr + i;
+		chan->vchan = info->first_channel_nr + i + total_channels;
 
 		memcpy(chan->dev_id, "Unused", 7);
 
 		if (info->flags & DMAC_CHANNELS_TEI_CAPABLE)
 			chan->flags |= DMA_TEI_CAPABLE;
 
-		init_MUTEX(&chan->sem);
 		init_waitqueue_head(&chan->wait_queue);
-
 		dma_create_sysfs_files(chan, info);
 	}
 
@@ -271,6 +389,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(register_dmac);
 
 void unregister_dmac(struct dma_info *info)
 {
@@ -285,31 +404,16 @@
 	list_del(&info->list);
 	platform_device_unregister(info->pdev);
 }
+EXPORT_SYMBOL(unregister_dmac);
 
 static int __init dma_api_init(void)
 {
-	printk("DMA: Registering DMA API.\n");
-
-#ifdef CONFIG_PROC_FS
+	printk(KERN_NOTICE "DMA: Registering DMA API.\n");
 	create_proc_read_entry("dma", 0, 0, dma_read_proc, 0);
-#endif
-
 	return 0;
 }
-
 subsys_initcall(dma_api_init);
 
 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
 MODULE_DESCRIPTION("DMA API for SuperH");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(request_dma);
-EXPORT_SYMBOL(free_dma);
-EXPORT_SYMBOL(register_dmac);
-EXPORT_SYMBOL(get_dma_residue);
-EXPORT_SYMBOL(get_dma_info);
-EXPORT_SYMBOL(get_dma_channel);
-EXPORT_SYMBOL(dma_xfer);
-EXPORT_SYMBOL(dma_wait_for_completion);
-EXPORT_SYMBOL(dma_configure_channel);
-
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 6607860..f63721e 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -94,20 +94,13 @@
 	if (unlikely(!chan->flags & DMA_TEI_CAPABLE))
 		return 0;
 
-	chan->name = kzalloc(32, GFP_KERNEL);
-	if (unlikely(chan->name == NULL))
-		return -ENOMEM;
-	snprintf(chan->name, 32, "DMAC Transfer End (Channel %d)",
-		 chan->chan);
-
 	return request_irq(get_dmte_irq(chan->chan), dma_tei,
-			   IRQF_DISABLED, chan->name, chan);
+			   IRQF_DISABLED, chan->dev_id, chan);
 }
 
 static void sh_dmac_free_dma(struct dma_channel *chan)
 {
 	free_irq(get_dmte_irq(chan->chan), chan);
-	kfree(chan->name);
 }
 
 static void
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index 29b8ef9..eebcd47 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -3,7 +3,7 @@
  *
  * sysfs interface for SH DMA API
  *
- * Copyright (C) 2004, 2005  Paul Mundt
+ * Copyright (C) 2004 - 2006  Paul Mundt
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -21,7 +21,6 @@
 static struct sysdev_class dma_sysclass = {
 	set_kset_name("dma"),
 };
-
 EXPORT_SYMBOL(dma_sysclass);
 
 static ssize_t dma_show_devices(struct sys_device *dev, char *buf)
@@ -31,7 +30,10 @@
 
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 		struct dma_info *info = get_dma_info(i);
-		struct dma_channel *channel = &info->channels[i];
+		struct dma_channel *channel = get_dma_channel(i);
+
+		if (unlikely(!info) || !channel)
+			continue;
 
 		len += sprintf(buf + len, "%2d: %14s    %s\n",
 			       channel->chan, info->name,
@@ -125,11 +127,16 @@
 	if (ret)
 		return ret;
 
-	sysdev_create_file(dev, &attr_dev_id);
-	sysdev_create_file(dev, &attr_count);
-	sysdev_create_file(dev, &attr_mode);
-	sysdev_create_file(dev, &attr_flags);
-	sysdev_create_file(dev, &attr_config);
+	ret |= sysdev_create_file(dev, &attr_dev_id);
+	ret |= sysdev_create_file(dev, &attr_count);
+	ret |= sysdev_create_file(dev, &attr_mode);
+	ret |= sysdev_create_file(dev, &attr_flags);
+	ret |= sysdev_create_file(dev, &attr_config);
+
+	if (unlikely(ret)) {
+		dev_err(&info->pdev->dev, "Failed creating attrs\n");
+		return ret;
+	}
 
 	snprintf(name, sizeof(name), "dma%d", chan->chan);
 	return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name);
diff --git a/arch/sh/drivers/pci/ops-titan.c b/arch/sh/drivers/pci/ops-titan.c
index cd56d53..ac8ee23 100644
--- a/arch/sh/drivers/pci/ops-titan.c
+++ b/arch/sh/drivers/pci/ops-titan.c
@@ -15,25 +15,21 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/titan.h>
 #include "pci-sh4.h"
 
+static char titan_irq_tab[] __initdata = {
+	TITAN_IRQ_WAN,
+	TITAN_IRQ_LAN,
+	TITAN_IRQ_MPCIA,
+	TITAN_IRQ_MPCIB,
+	TITAN_IRQ_USB,
+};
+
 int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
 {
-	int irq = -1;
-
-	switch (slot) {
-	case 0: irq = TITAN_IRQ_WAN;   break;	/* eth0 (WAN) */
-	case 1: irq = TITAN_IRQ_LAN;   break;	/* eth1 (LAN) */
-	case 2: irq = TITAN_IRQ_MPCIA; break;	/* mPCI A */
-	case 3: irq = TITAN_IRQ_MPCIB; break;	/* mPCI B */
-	case 4: irq = TITAN_IRQ_USB;   break;	/* USB */
-	default:
-		printk(KERN_INFO "PCI: Bad IRQ mapping "
-				 "request for slot %d\n", slot);
-		return -1;
-	}
+	int irq = titan_irq_tab[slot];
 
 	printk("PCI: Mapping TITAN IRQ for slot %d, pin %c to irq %d\n",
 		slot, pin - 1 + 'A', irq);
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
index d6e6352..602b644 100644
--- a/arch/sh/drivers/pci/pci-sh7780.c
+++ b/arch/sh/drivers/pci/pci-sh7780.c
@@ -22,6 +22,20 @@
 #include <linux/delay.h>
 #include "pci-sh4.h"
 
+#define INTC_BASE	0xffd00000
+#define INTC_ICR0	(INTC_BASE+0x0)
+#define INTC_ICR1	(INTC_BASE+0x1c)
+#define INTC_INTPRI	(INTC_BASE+0x10)
+#define INTC_INTREQ	(INTC_BASE+0x24)
+#define INTC_INTMSK0	(INTC_BASE+0x44)
+#define INTC_INTMSK1	(INTC_BASE+0x48)
+#define INTC_INTMSK2	(INTC_BASE+0x40080)
+#define INTC_INTMSKCLR0	(INTC_BASE+0x64)
+#define INTC_INTMSKCLR1	(INTC_BASE+0x68)
+#define INTC_INTMSKCLR2	(INTC_BASE+0x40084)
+#define INTC_INT2MSKR	(INTC_BASE+0x40038)
+#define INTC_INT2MSKCR	(INTC_BASE+0x4003c)
+
 /*
  * Initialization. Try all known PCI access methods. Note that we support
  * using both PCI BIOS and direct access: in such cases, we use I/O ports
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
new file mode 100644
index 0000000..f2b9157
--- /dev/null
+++ b/arch/sh/drivers/push-switch.c
@@ -0,0 +1,138 @@
+/*
+ * Generic push-switch framework
+ *
+ * Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/push-switch.h>
+
+#define DRV_NAME "push-switch"
+#define DRV_VERSION "0.1.0"
+
+static ssize_t switch_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct push_switch_platform_info *psw_info = dev->platform_data;
+	return sprintf(buf, "%s\n", psw_info->name);
+}
+static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL);
+
+static void switch_timer(unsigned long data)
+{
+	struct push_switch *psw = (struct push_switch *)data;
+
+	schedule_work(&psw->work);
+}
+
+static void switch_work_handler(void *data)
+{
+	struct platform_device *pdev = data;
+	struct push_switch *psw = platform_get_drvdata(pdev);
+
+	psw->state = 0;
+
+	kobject_uevent(&pdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int switch_drv_probe(struct platform_device *pdev)
+{
+	struct push_switch_platform_info *psw_info;
+	struct push_switch *psw;
+	int ret, irq;
+
+	psw = kzalloc(sizeof(struct push_switch), GFP_KERNEL);
+	if (unlikely(!psw))
+		return -ENOMEM;
+
+	irq = platform_get_irq(pdev, 0);
+	if (unlikely(irq < 0)) {
+		ret = -ENODEV;
+		goto err;
+	}
+
+	psw_info = pdev->dev.platform_data;
+	BUG_ON(!psw_info);
+
+	ret = request_irq(irq, psw_info->irq_handler,
+			  IRQF_DISABLED | psw_info->irq_flags,
+			  psw_info->name ? psw_info->name : DRV_NAME, pdev);
+	if (unlikely(ret < 0))
+		goto err;
+
+	if (psw_info->name) {
+		ret = device_create_file(&pdev->dev, &dev_attr_switch);
+		if (unlikely(ret)) {
+			dev_err(&pdev->dev, "Failed creating device attrs\n");
+			ret = -EINVAL;
+			goto err_irq;
+		}
+	}
+
+	INIT_WORK(&psw->work, switch_work_handler, pdev);
+	init_timer(&psw->debounce);
+
+	psw->debounce.function = switch_timer;
+	psw->debounce.data = (unsigned long)psw;
+
+	platform_set_drvdata(pdev, psw);
+
+	return 0;
+
+err_irq:
+	free_irq(irq, pdev);
+err:
+	kfree(psw);
+	return ret;
+}
+
+static int switch_drv_remove(struct platform_device *pdev)
+{
+	struct push_switch *psw = platform_get_drvdata(pdev);
+	struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
+	int irq = platform_get_irq(pdev, 0);
+
+	if (psw_info->name)
+		device_remove_file(&pdev->dev, &dev_attr_switch);
+
+	platform_set_drvdata(pdev, NULL);
+	flush_scheduled_work();
+	del_timer_sync(&psw->debounce);
+	free_irq(irq, pdev);
+
+	kfree(psw);
+
+	return 0;
+}
+
+static struct platform_driver switch_driver = {
+	.probe		= switch_drv_probe,
+	.remove		= switch_drv_remove,
+	.driver		= {
+		.name	= DRV_NAME,
+	},
+};
+
+static int __init switch_init(void)
+{
+	printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
+	return platform_driver_register(&switch_driver);
+}
+
+static void __exit switch_exit(void)
+{
+	platform_driver_unregister(&switch_driver);
+}
+module_init(switch_init);
+module_exit(switch_exit);
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Paul Mundt");
+MODULE_LICENSE("GPLv2");
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 5da88a4..99c7e52 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -4,7 +4,7 @@
 
 extra-y	:= head.o init_task.o vmlinux.lds
 
-obj-y	:= process.o signal.o entry.o traps.o irq.o \
+obj-y	:= process.o signal.o traps.o irq.o \
 	ptrace.o setup.o time.o sys_sh.o semaphore.o \
 	io.o io_generic.o sh_ksyms.o syscalls.o
 
@@ -21,3 +21,4 @@
 obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_APM)		+= apm.o
 obj-$(CONFIG_PM)		+= pm.o
+obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index fb5dac0..0582e67 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -2,11 +2,12 @@
 # Makefile for the Linux/SuperH CPU-specifc backends.
 #
 
-obj-y	+= irq/ init.o clock.o
-
-obj-$(CONFIG_CPU_SH2)		+= sh2/
-obj-$(CONFIG_CPU_SH3)		+= sh3/
-obj-$(CONFIG_CPU_SH4)		+= sh4/
+obj-$(CONFIG_CPU_SH2)		= sh2/
+obj-$(CONFIG_CPU_SH2A)		= sh2a/
+obj-$(CONFIG_CPU_SH3)		= sh3/
+obj-$(CONFIG_CPU_SH4)		= sh4/
 
 obj-$(CONFIG_UBC_WAKEUP)	+= ubc.o
 obj-$(CONFIG_SH_ADC)		+= adc.o
+
+obj-y	+= irq/ init.o clock.o
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index 51ec64c..abb586b 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -5,9 +5,11 @@
  *
  * This clock framework is derived from the OMAP version by:
  *
- *	Copyright (C) 2004 Nokia Corporation
+ *	Copyright (C) 2004 - 2005 Nokia Corporation
  *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  *
+ *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -20,6 +22,7 @@
 #include <linux/kref.h>
 #include <linux/seq_file.h>
 #include <linux/err.h>
+#include <linux/platform_device.h>
 #include <asm/clock.h>
 #include <asm/timer.h>
 
@@ -195,17 +198,37 @@
 		propagate_rate(clk);
 }
 
-struct clk *clk_get(const char *id)
+/*
+ * Returns a clock. Note that we first try to use device id on the bus
+ * and clock name. If this fails, we try to use clock name only.
+ */
+struct clk *clk_get(struct device *dev, const char *id)
 {
 	struct clk *p, *clk = ERR_PTR(-ENOENT);
+	int idno;
+
+	if (dev == NULL || dev->bus != &platform_bus_type)
+		idno = -1;
+	else
+		idno = to_platform_device(dev)->id;
 
 	mutex_lock(&clock_list_sem);
 	list_for_each_entry(p, &clock_list, node) {
+		if (p->id == idno &&
+		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
+			clk = p;
+			goto found;
+		}
+	}
+
+	list_for_each_entry(p, &clock_list, node) {
 		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
 			clk = p;
 			break;
 		}
 	}
+
+found:
 	mutex_unlock(&clock_list_sem);
 
 	return clk;
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index bfb90eb..4812176 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -68,12 +68,14 @@
 
 		waysize = cpu_data->dcache.sets;
 
+#ifdef CCR_CACHE_ORA
 		/*
 		 * If the OC is already in RAM mode, we only have
 		 * half of the entries to flush..
 		 */
 		if (ccr & CCR_CACHE_ORA)
 			waysize >>= 1;
+#endif
 
 		waysize <<= cpu_data->dcache.entry_shift;
 
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 1c034c2..0049d21 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,8 +1,9 @@
 #
 # Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
 #
-obj-y	+= ipr.o imask.o
+obj-y	+= imask.o
 
+obj-$(CONFIG_CPU_HAS_IPR_IRQ)		+= ipr.o
 obj-$(CONFIG_CPU_HAS_PINT_IRQ)		+= pint.o
 obj-$(CONFIG_CPU_HAS_MASKREG_IRQ)	+= maskreg.o
 obj-$(CONFIG_CPU_HAS_INTC2_IRQ)		+= intc2.o
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index a33ae3e..301b505 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -53,7 +53,10 @@
 {
 	unsigned long __dummy;
 
-	asm volatile("ldc	%2, r6_bank\n\t"
+	asm volatile(
+#ifdef CONFIG_CPU_HAS_SR_RB
+		     "ldc	%2, r6_bank\n\t"
+#endif
 		     "stc	sr, %0\n\t"
 		     "and	#0xf0, %0\n\t"
 		     "shlr2	%0\n\t"
diff --git a/arch/sh/kernel/cpu/irq/intc2.c b/arch/sh/kernel/cpu/irq/intc2.c
index 74ca576..74defe7 100644
--- a/arch/sh/kernel/cpu/irq/intc2.c
+++ b/arch/sh/kernel/cpu/irq/intc2.c
@@ -11,22 +11,29 @@
  * Hitachi 7751, the STM ST40 STB1, SH7760, and SH7780.
  */
 #include <linux/kernel.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
-#include <asm/system.h>
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7760)
+#define INTC2_BASE	0xfe080000
+#define INTC2_INTMSK	(INTC2_BASE + 0x40)
+#define INTC2_INTMSKCLR	(INTC2_BASE + 0x60)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
+#define INTC2_BASE	0xffd40000
+#define INTC2_INTMSK	(INTC2_BASE + 0x38)
+#define INTC2_INTMSKCLR	(INTC2_BASE + 0x3c)
+#endif
 
 static void disable_intc2_irq(unsigned int irq)
 {
 	struct intc2_data *p = get_irq_chip_data(irq);
-	ctrl_outl(1 << p->msk_shift,
-		  INTC2_BASE + INTC2_INTMSK_OFFSET + p->msk_offset);
+	ctrl_outl(1 << p->msk_shift, INTC2_INTMSK + p->msk_offset);
 }
 
 static void enable_intc2_irq(unsigned int irq)
 {
 	struct intc2_data *p = get_irq_chip_data(irq);
-	ctrl_outl(1 << p->msk_shift,
-		  INTC2_BASE + INTC2_INTMSKCLR_OFFSET + p->msk_offset);
+	ctrl_outl(1 << p->msk_shift, INTC2_INTMSKCLR + p->msk_offset);
 }
 
 static struct irq_chip intc2_irq_chip = {
@@ -61,12 +68,10 @@
 		/* Set the priority level */
 		local_irq_save(flags);
 
-		ipr = ctrl_inl(INTC2_BASE + INTC2_INTPRI_OFFSET +
-			       p->ipr_offset);
+		ipr = ctrl_inl(INTC2_BASE + p->ipr_offset);
 		ipr &= ~(0xf << p->ipr_shift);
 		ipr |= p->priority << p->ipr_shift;
-		ctrl_outl(ipr, INTC2_BASE + INTC2_INTPRI_OFFSET +
-			  p->ipr_offset);
+		ctrl_outl(ipr, INTC2_BASE + p->ipr_offset);
 
 		local_irq_restore(flags);
 
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index a008956..35eb575 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -19,25 +19,21 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/module.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/machvec.h>
-
+#include <linux/io.h>
+#include <linux/interrupt.h>
 
 static void disable_ipr_irq(unsigned int irq)
 {
 	struct ipr_data *p = get_irq_chip_data(irq);
-	int shift = p->shift*4;
 	/* Set the priority in IPR to 0 */
-	ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << shift)), p->addr);
+	ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << p->shift)), p->addr);
 }
 
 static void enable_ipr_irq(unsigned int irq)
 {
 	struct ipr_data *p = get_irq_chip_data(irq);
-	int shift = p->shift*4;
 	/* Set priority in IPR back to original value */
-	ctrl_outw(ctrl_inw(p->addr) | (p->priority << shift), p->addr);
+	ctrl_outw(ctrl_inw(p->addr) | (p->priority << p->shift), p->addr);
 }
 
 static struct irq_chip ipr_irq_chip = {
@@ -53,6 +49,10 @@
 
 	for (i = 0; i < nr_irqs; i++) {
 		unsigned int irq = table[i].irq;
+		table[i].addr = map_ipridx_to_addr(table[i].ipr_idx);
+		/* could the IPR index be mapped, if not we ignore this */
+		if (table[i].addr == 0)
+			continue;
 		disable_irq_nosync(irq);
 		set_irq_chip_and_handler_name(irq, &ipr_irq_chip,
 				      handle_level_irq, "level");
@@ -62,83 +62,6 @@
 }
 EXPORT_SYMBOL(make_ipr_irq);
 
-static struct ipr_data sys_ipr_map[] = {
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
-	{ TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY },
-	{ TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY },
-#ifdef RTC_IRQ
-	{ RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY },
-#endif
-#ifdef SCI_ERI_IRQ
-	{ SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-	{ SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-	{ SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-#endif
-#ifdef SCIF1_ERI_IRQ
-	{ SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-	{ SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-	{ SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-	{ SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-	{ SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY },
-	{ DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY },
-	{ DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY },
-	{ VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY },
-#endif
-#ifdef SCIF_ERI_IRQ
-	{ SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-	{ SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-	{ SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-	{ SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-#endif
-#ifdef IRDA_ERI_IRQ
-	{ IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-	{ IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-	{ IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-	{ IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
-	/*
-	 * Initialize the Interrupt Controller (INTC)
-	 * registers to their power on values
-	 */
-
-	/*
-	 * Enable external irq (INTC IRQ mode).
-	 * You should set corresponding bits of PFC to "00"
-	 * to enable these interrupts.
-	 */
-	{ IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY },
-	{ IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY },
-	{ IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY },
-	{ IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY },
-	{ IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY },
-	{ IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY },
-#endif
-#endif
-};
-
-void __init init_IRQ(void)
-{
-	make_ipr_irq(sys_ipr_map, ARRAY_SIZE(sys_ipr_map));
-
-#ifdef CONFIG_CPU_HAS_PINT_IRQ
-	init_IRQ_pint();
-#endif
-
-#ifdef CONFIG_CPU_HAS_INTC2_IRQ
-	init_IRQ_intc2();
-#endif
-	/* Perform the machine specific initialisation */
-	if (sh_mv.mv_init_irq != NULL)
-		sh_mv.mv_init_irq();
-
-	irq_ctx_init(smp_processor_id());
-}
-
 #if !defined(CONFIG_CPU_HAS_PINT_IRQ)
 int ipr_irq_demux(int irq)
 {
diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile
index 389353f..f0f059a 100644
--- a/arch/sh/kernel/cpu/sh2/Makefile
+++ b/arch/sh/kernel/cpu/sh2/Makefile
@@ -2,5 +2,6 @@
 # Makefile for the Linux/SuperH SH-2 backends.
 #
 
-obj-y	:= probe.o
+obj-y	:= ex.o probe.o entry.o
 
+obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
new file mode 100644
index 0000000..d0440b2
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
@@ -0,0 +1,81 @@
+/*
+ * arch/sh/kernel/cpu/sh2/clock-sh7619.c
+ *
+ * SH7619 support for the clock framework
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ *  Copyright (C) 2005  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={1,2};
+const static int pfc_divisors[]={1,2,0,4};
+
+#if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6)
+#define PLL2 (2)
+#else
+#error "Illigal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+	clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+}
+
+static struct clk_ops sh7619_master_clk_ops = {
+	.init		= master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7619_module_clk_ops = {
+	.recalc		= module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+	clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+}
+
+static struct clk_ops sh7619_bus_clk_ops = {
+	.recalc		= bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+	clk->rate = clk->parent->rate;
+}
+
+static struct clk_ops sh7619_cpu_clk_ops = {
+	.recalc		= cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7619_clk_ops[] = {
+	&sh7619_master_clk_ops,
+	&sh7619_module_clk_ops,
+	&sh7619_bus_clk_ops,
+	&sh7619_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+	if (idx < ARRAY_SIZE(sh7619_clk_ops))
+		*ops = sh7619_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
new file mode 100644
index 0000000..34d51b3
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -0,0 +1,341 @@
+/*
+ * arch/sh/kernel/cpu/sh2/entry.S
+ *
+ * The SH-2 exception entry
+ *
+ * Copyright (C) 2005,2006 Yoshinori Sato
+ * Copyright (C) 2005  AXE,Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/page.h>
+	
+/* Offsets to the stack */
+OFF_R0  =  0		/* Return value. New ABI also arg4 */
+OFF_R1  =  4     	/* New ABI: arg5 */
+OFF_R2  =  8     	/* New ABI: arg6 */
+OFF_R3  =  12     	/* New ABI: syscall_nr */
+OFF_R4  =  16     	/* New ABI: arg0 */
+OFF_R5  =  20     	/* New ABI: arg1 */
+OFF_R6  =  24     	/* New ABI: arg2 */
+OFF_R7  =  28     	/* New ABI: arg3 */
+OFF_SP	=  (15*4)
+OFF_PC  =  (16*4)
+OFF_SR	=  (16*4+2*4)
+OFF_TRA	=  (16*4+6*4)
+
+#include <asm/entry-macros.S>
+
+ENTRY(exception_handler)
+	! already saved r0/r1
+	mov.l	r2,@-sp
+	mov.l	r3,@-sp
+	mov	r0,r1
+	cli
+	mov.l	$cpu_mode,r2
+	mov.l	@r2,r0
+	mov.l	@(5*4,r15),r3	! previous SR
+	shll2	r3		! set "S" flag
+	rotl	r0		! T <- "S" flag
+	rotl	r0		! "S" flag is LSB
+	rotcr	r3		! T -> r3:b30
+	shlr	r3
+	shlr	r0
+	bt/s	1f
+	 mov.l	r3,@(5*4,r15)	! copy cpu mode to SR
+	! switch to kernel mode
+	mov	#1,r0
+	rotr	r0
+	rotr	r0
+	mov.l	r0,@r2		! enter kernel mode
+	mov.l	$current_thread_info,r2
+	mov.l	@r2,r2
+	mov	#0x20,r0
+	shll8	r0
+	add	r2,r0
+	mov	r15,r2		! r2 = user stack top
+	mov	r0,r15		! switch kernel stack
+	add	#-4,r15		! dummy
+	mov.l	r1,@-r15	! TRA
+	sts.l	macl, @-r15
+	sts.l	mach, @-r15
+	stc.l	gbr, @-r15
+	mov.l	@(4*4,r2),r0
+	mov.l	@(5*4,r2),r1
+	mov.l	r1,@-r15	! original SR
+	sts.l	pr,@-r15
+	mov.l	r0,@-r15	! original PC
+	mov	r2,r3
+	add	#(4+2)*4,r3	! rewind r0 - r3 + exception frame
+	mov.l	r3,@-r15	! original SP
+	mov.l	r14,@-r15
+	mov.l	r13,@-r15
+	mov.l	r12,@-r15
+	mov.l	r11,@-r15
+	mov.l	r10,@-r15
+	mov.l	r9,@-r15
+	mov.l	r8,@-r15
+	mov.l	r7,@-r15
+	mov.l	r6,@-r15
+	mov.l	r5,@-r15
+	mov.l	r4,@-r15
+	mov	r2,r8		! copy user -> kernel stack
+	mov.l	@r8+,r3
+	mov.l	r3,@-r15
+	mov.l	@r8+,r2
+	mov.l	r2,@-r15
+	mov.l	@r8+,r1
+	mov.l	r1,@-r15
+	mov.l	@r8+,r0
+	bra	2f
+	 mov.l	r0,@-r15
+1:
+	! in kernel exception
+	mov	#(22-4-4-1)*4+4,r0
+	mov	r15,r2
+	sub	r0,r15
+	mov.l	@r2+,r0		! old R3
+	mov.l	r0,@-r15	
+	mov.l	@r2+,r0		! old R2
+	mov.l	r0,@-r15	
+	mov.l	@r2+,r0		! old R1
+	mov.l	r0,@-r15	
+	mov.l	@r2+,r0		! old R0
+	mov.l	r0,@-r15	
+	mov.l	@r2+,r3		! old PC
+	mov.l	@r2+,r0		! old SR
+	add	#-4,r2		! exception frame stub (sr)
+	mov.l	r1,@-r2		! TRA
+	sts.l	macl, @-r2
+	sts.l	mach, @-r2
+	stc.l	gbr, @-r2
+	mov.l	r0,@-r2		! save old SR
+	sts.l	pr,@-r2
+	mov.l	r3,@-r2		! save old PC
+	mov	r2,r0
+	add	#8*4,r0
+	mov.l	r0,@-r2		! save old SP
+	mov.l	r14,@-r2
+	mov.l	r13,@-r2
+	mov.l	r12,@-r2
+	mov.l	r11,@-r2
+	mov.l	r10,@-r2
+	mov.l	r9,@-r2
+	mov.l	r8,@-r2
+	mov.l	r7,@-r2
+	mov.l	r6,@-r2
+	mov.l	r5,@-r2
+	mov.l	r4,@-r2
+	mov.l	@(OFF_R0,r15),r0
+	mov.l	@(OFF_R1,r15),r1
+	mov.l	@(OFF_R2,r15),r2
+	mov.l	@(OFF_R3,r15),r3
+2:
+	mov	#OFF_TRA,r8
+	add	r15,r8
+	mov.l	@r8,r9	
+	mov	#64,r8
+	cmp/hs	r8,r9
+	bt	interrupt_entry	! vec >= 64 is interrupt
+	mov	#32,r8
+	cmp/hs	r8,r9
+	bt	trap_entry	! 64 > vec >= 32  is trap
+	mov.l	4f,r8
+	mov	r9,r4
+	shll2	r9
+	add	r9,r8
+	mov.l	@r8,r8
+	mov	#0,r9
+	cmp/eq	r9,r8
+	bf	3f
+	mov.l	8f,r8		! unhandled exception
+3:
+	mov.l	5f,r10
+	jmp	@r8
+	 lds	r10,pr
+
+interrupt_entry:
+	mov	r9,r4
+	mov.l	6f,r9
+	mov.l	7f,r8
+	jmp	@r8
+	 lds	r9,pr
+
+	.align	2
+4:	.long	exception_handling_table
+5:	.long	ret_from_exception
+6:	.long	ret_from_irq
+7:	.long	do_IRQ
+8:	.long	do_exception_error
+	
+trap_entry:	
+	add	#-0x10,r9
+	shll2	r9			! TRA
+	mov	#OFF_TRA,r8
+	add	r15,r8
+	mov.l	r9,@r8
+	mov	r9,r8
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r9
+	jsr	@r9
+	 nop
+#endif
+	sti
+	bra	system_call
+	 nop
+	
+	.align	2
+1:	.long	syscall_exit
+2:	.long	break_point_trap_software
+3:	.long	NR_syscalls
+4:	.long	sys_call_table
+#ifdef CONFIG_TRACE_IRQFLAGS
+5:	.long	trace_hardirqs_on
+#endif
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+	/* Unwind the stack and jmp to the debug entry */
+debug_kernel_fw:
+	mov	r15,r0
+	add	#(22-4)*4-4,r0
+	ldc.l	@r0+,gbr
+	lds.l	@r0+,mach
+	lds.l	@r0+,macl
+	mov	r15,r0
+	mov.l	@(OFF_SP,r0),r1
+	mov	#OFF_SR,r2
+	mov.l	@(r0,r2),r3
+	mov.l	r3,@-r1
+	mov	#OFF_SP,r2
+	mov.l	@(r0,r2),r3
+	mov.l	r3,@-r1
+	mov	r15,r0
+	add	#(22-4)*4-8,r0
+	mov.l	1f,r2
+	mov.l	@r2,r2
+	stc	sr,r3
+	mov.l	r2,@r0
+	mov.l	r3,@r0
+	mov.l	r1,@(8,r0)	
+	mov.l	@r15+, r0
+	mov.l	@r15+, r1
+	mov.l	@r15+, r2
+	mov.l	@r15+, r3
+	mov.l	@r15+, r4
+	mov.l	@r15+, r5
+	mov.l	@r15+, r6
+	mov.l	@r15+, r7
+	mov.l	@r15+, r8
+	mov.l	@r15+, r9
+	mov.l	@r15+, r10
+	mov.l	@r15+, r11
+	mov.l	@r15+, r12
+	mov.l	@r15+, r13
+	mov.l	@r15+, r14
+	add	#8,r15
+	lds.l	@r15+, pr
+	rte
+	 mov.l	@r15+,r15
+	.align	2
+1:	.long	gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+ENTRY(address_error_handler)
+	mov	r15,r4				! regs
+	add	#4,r4
+	mov	#OFF_PC,r0
+	mov.l	@(r0,r15),r6			! pc
+	mov.l	1f,r0
+	jmp	@r0
+	 mov	#0,r5				! writeaccess is unknown
+	.align	2
+
+1:	.long	do_address_error
+
+restore_all:
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	3f, r0
+	jsr	@r0
+	 nop
+#endif
+	mov	r15,r0
+	mov.l	$cpu_mode,r2
+	mov	#OFF_SR,r3
+	mov.l	@(r0,r3),r1
+	mov.l	r1,@r2
+	shll2	r1				! clear MD bit
+	shlr2	r1
+	mov.l	@(OFF_SP,r0),r2
+	add	#-8,r2
+	mov.l	r2,@(OFF_SP,r0)			! point exception frame top
+	mov.l	r1,@(4,r2)			! set sr
+	mov	#OFF_PC,r3
+	mov.l	@(r0,r3),r1
+	mov.l	r1,@r2				! set pc
+	add	#4*16+4,r0
+	lds.l	@r0+,pr
+	add	#4,r0				! skip sr
+	ldc.l	@r0+,gbr
+	lds.l	@r0+,mach
+	lds.l	@r0+,macl
+	get_current_thread_info r0, r1
+	mov.l	$current_thread_info,r1
+	mov.l	r0,@r1
+	mov.l	@r15+,r0
+	mov.l	@r15+,r1
+	mov.l	@r15+,r2
+	mov.l	@r15+,r3
+	mov.l	@r15+,r4
+	mov.l	@r15+,r5
+	mov.l	@r15+,r6
+	mov.l	@r15+,r7
+	mov.l	@r15+,r8
+	mov.l	@r15+,r9
+	mov.l	@r15+,r10
+	mov.l	@r15+,r11
+	mov.l	@r15+,r12
+	mov.l	@r15+,r13
+	mov.l	@r15+,r14
+	mov.l	@r15,r15
+	rte
+	 nop
+2:
+	mov.l	1f,r8
+	mov.l	2f,r9
+	jmp	@r9
+	 lds	r8,pr
+
+	.align	2
+$current_thread_info:
+	.long	__current_thread_info
+$cpu_mode:	
+	.long	__cpu_mode
+#ifdef CONFIG_TRACE_IRQFLAGS
+3:	.long	trace_hardirqs_off
+#endif
+		
+! common exception handler
+#include "../../entry-common.S"
+	
+	.data
+! cpu operation mode 
+! bit30 = MD (compatible SH3/4)
+__cpu_mode:
+	.long	0x40000000
+		
+	.section	.bss
+__current_thread_info:
+	.long	0
+
+ENTRY(exception_handling_table)
+	.space	4*32
diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S
new file mode 100644
index 0000000..6d285af
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/ex.S
@@ -0,0 +1,46 @@
+/*
+ * arch/sh/kernel/cpu/sh2/ex.S
+ *
+ * The SH-2 exception vector table
+ *
+ * Copyright (C) 2005 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+
+!
+! convert Exception Vector to Exception Number
+!
+exception_entry:	
+no	=	0
+	.rept	256
+	mov.l	r0,@-sp
+	mov	#no,r0
+	bra	exception_trampoline
+	and	#0xff,r0
+no	=	no + 1
+	.endr
+exception_trampoline:
+	mov.l	r1,@-sp
+	mov.l	$exception_handler,r1
+	jmp	@r1
+
+	.align	2
+$exception_entry:
+	.long	exception_entry
+$exception_handler:
+	.long	exception_handler
+!
+! Exception Vector Base
+!
+	.align	2
+ENTRY(vbr_base)
+vector	=	0
+	.rept	256
+	.long	exception_entry + vector * 8
+vector	=	vector + 1
+	.endr
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index f17a2a0..ba527d9 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -17,17 +17,23 @@
 
 int __init detect_cpu_and_cache_system(void)
 {
-	/*
-	 * For now, assume SH7604 .. fix this later.
-	 */
+#if defined(CONFIG_CPU_SUBTYPE_SH7604)
 	cpu_data->type			= CPU_SH7604;
 	cpu_data->dcache.ways		= 4;
-	cpu_data->dcache.way_shift	= 6;
+	cpu_data->dcache.way_incr	= (1<<10);
 	cpu_data->dcache.sets		= 64;
 	cpu_data->dcache.entry_shift	= 4;
 	cpu_data->dcache.linesz		= L1_CACHE_BYTES;
 	cpu_data->dcache.flags		= 0;
-
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+	cpu_data->type			= CPU_SH7619;
+	cpu_data->dcache.ways		= 4;
+	cpu_data->dcache.way_incr	= (1<<12);
+	cpu_data->dcache.sets		= 256;
+	cpu_data->dcache.entry_shift	= 4;
+	cpu_data->dcache.linesz		= L1_CACHE_BYTES;
+	cpu_data->dcache.flags		= 0;
+#endif
 	/*
 	 * SH-2 doesn't have separate caches
 	 */
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
new file mode 100644
index 0000000..82c2d90
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -0,0 +1,53 @@
+/*
+ * SH7619 Setup
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+	{
+		.mapbase	= 0xf8400000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 88, 89, 91, 90},
+	}, {
+		.mapbase	= 0xf8410000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 92, 93, 95, 94},
+	}, {
+		.mapbase	= 0xf8420000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 96, 97, 99, 98},
+	}, {
+		.flags = 0,
+	}
+};
+
+static struct platform_device sci_device = {
+	.name		= "sh-sci",
+	.id		= -1,
+	.dev		= {
+		.platform_data	= sci_platform_data,
+	},
+};
+
+static struct platform_device *sh7619_devices[] __initdata = {
+	&sci_device,
+};
+
+static int __init sh7619_devices_setup(void)
+{
+	return platform_add_devices(sh7619_devices,
+				    ARRAY_SIZE(sh7619_devices));
+}
+__initcall(sh7619_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
new file mode 100644
index 0000000..350972a
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Linux/SuperH SH-2A backends.
+#
+
+obj-y	:= common.o probe.o
+
+common-y	+= $(addprefix ../sh2/, ex.o)
+common-y	+= $(addprefix ../sh2/, entry.o)
+
+obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
new file mode 100644
index 0000000..a9ad309
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
@@ -0,0 +1,85 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+ *
+ * SH7206 support for the clock framework
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ *  Copyright (C) 2005  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={1,2,3,4,6,8};
+const static int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+#if (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 6)
+#define PLL2 (2)
+#elif (CONFIG_SH_CLK_MD == 7)
+#define PLL2 (1)
+#else
+#error "Illigal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+	clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct clk_ops sh7206_master_clk_ops = {
+	.init		= master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7206_module_clk_ops = {
+	.recalc		= module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+	clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct clk_ops sh7206_bus_clk_ops = {
+	.recalc		= bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	clk->rate = clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct clk_ops sh7206_cpu_clk_ops = {
+	.recalc		= cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7206_clk_ops[] = {
+	&sh7206_master_clk_ops,
+	&sh7206_module_clk_ops,
+	&sh7206_bus_clk_ops,
+	&sh7206_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+	if (idx < ARRAY_SIZE(sh7206_clk_ops))
+		*ops = sh7206_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
new file mode 100644
index 0000000..87c6c05
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -0,0 +1,39 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/probe.c
+ *
+ * CPU Subtype Probing for SH-2A.
+ *
+ * Copyright (C) 2004, 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+int __init detect_cpu_and_cache_system(void)
+{
+	/* Just SH7206 for now .. */
+	cpu_data->type			= CPU_SH7206;
+
+	cpu_data->dcache.ways		= 4;
+	cpu_data->dcache.way_incr	= (1 << 11);
+	cpu_data->dcache.sets		= 128;
+	cpu_data->dcache.entry_shift	= 4;
+	cpu_data->dcache.linesz		= L1_CACHE_BYTES;
+	cpu_data->dcache.flags		= 0;
+
+	/*
+	 * The icache is the same as the dcache as far as this setup is
+	 * concerned. The only real difference in hardware is that the icache
+	 * lacks the U bit that the dcache has, none of this has any bearing
+	 * on the cache info.
+	 */
+	cpu_data->icache		= cpu_data->dcache;
+
+	return 0;
+}
+
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
new file mode 100644
index 0000000..cdfeef49
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -0,0 +1,58 @@
+/*
+ * SH7206 Setup
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+	{
+		.mapbase	= 0xfffe8000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 240, 241, 242, 243},
+	}, {
+		.mapbase	= 0xfffe8800,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 244, 245, 246, 247},
+	}, {
+		.mapbase	= 0xfffe9000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 248, 249, 250, 251},
+	}, {
+		.mapbase	= 0xfffe9800,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 252, 253, 254, 255},
+	}, {
+		.flags = 0,
+	}
+};
+
+static struct platform_device sci_device = {
+	.name		= "sh-sci",
+	.id		= -1,
+	.dev		= {
+		.platform_data	= sci_platform_data,
+	},
+};
+
+static struct platform_device *sh7206_devices[] __initdata = {
+	&sci_device,
+};
+
+static int __init sh7206_devices_setup(void)
+{
+	return platform_add_devices(sh7206_devices,
+				    ARRAY_SIZE(sh7206_devices));
+}
+__initcall(sh7206_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index 58d3815..83905e4 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the Linux/SuperH SH-3 backends.
 #
 
-obj-y	:= ex.o probe.o
+obj-y	:= ex.o probe.o entry.o
 
 # CPU subtype setup
 obj-$(CONFIG_CPU_SUBTYPE_SH7705)	+= setup-sh7705.o
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
index 10461a7..b791a29 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
@@ -24,7 +24,7 @@
 
 static void set_bus_parent(struct clk *clk)
 {
-	struct clk *bus_clk = clk_get("bus_clk");
+	struct clk *bus_clk = clk_get(NULL, "bus_clk");
 	clk->parent = bus_clk;
 	clk_put(bus_clk);
 }
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
new file mode 100644
index 0000000..8c0dc27
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -0,0 +1,693 @@
+/*
+ * arch/sh/kernel/entry.S
+ *
+ *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
+ *  Copyright (C) 2003 - 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*	
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * NOTE: This code uses a convention that instructions in the delay slot
+ * of a transfer-control instruction are indented by an extra space, thus:
+ *
+ *    jmp	@k0	    ! control-transfer instruction
+ *     ldc	k1, ssr     ! delay slot
+ *
+ * Stack layout in 'ret_from_syscall':
+ * 	ptrace needs to have all regs on the stack.
+ *	if the order here is changed, it needs to be
+ *	updated in ptrace.c and ptrace.h
+ *
+ *	r0
+ *      ...
+ *	r15 = stack pointer
+ *	spc
+ *	pr
+ *	ssr
+ *	gbr
+ *	mach
+ *	macl
+ *	syscall #
+ *
+ */
+#if defined(CONFIG_KGDB_NMI)
+NMI_VEC = 0x1c0			! Must catch early for debounce
+#endif
+
+/* Offsets to the stack */
+OFF_R0  =  0		/* Return value. New ABI also arg4 */
+OFF_R1  =  4     	/* New ABI: arg5 */
+OFF_R2  =  8     	/* New ABI: arg6 */
+OFF_R3  =  12     	/* New ABI: syscall_nr */
+OFF_R4  =  16     	/* New ABI: arg0 */
+OFF_R5  =  20     	/* New ABI: arg1 */
+OFF_R6  =  24     	/* New ABI: arg2 */
+OFF_R7  =  28     	/* New ABI: arg3 */
+OFF_SP	=  (15*4)
+OFF_PC  =  (16*4)
+OFF_SR	=  (16*4+8)
+OFF_TRA	=  (16*4+6*4)
+
+
+#define k0	r0
+#define k1	r1
+#define k2	r2
+#define k3	r3
+#define k4	r4
+
+#define g_imask		r6	/* r6_bank1 */
+#define k_g_imask	r6_bank	/* r6_bank1 */
+#define current		r7	/* r7_bank1 */
+
+#include <asm/entry-macros.S>
+	
+/*
+ * Kernel mode register usage:
+ *	k0	scratch
+ *	k1	scratch
+ *	k2	scratch (Exception code)
+ *	k3	scratch (Return address)
+ *	k4	scratch
+ *	k5	reserved
+ *	k6	Global Interrupt Mask (0--15 << 4)
+ *	k7	CURRENT_THREAD_INFO (pointer to current thread info)
+ */
+
+!
+! TLB Miss / Initial Page write exception handling
+!			_and_
+! TLB hits, but the access violate the protection.
+! It can be valid access, such as stack grow and/or C-O-W.
+!
+!
+! Find the pmd/pte entry and loadtlb
+! If it's not found, cause address error (SEGV)
+!
+! Although this could be written in assembly language (and it'd be faster),
+! this first version depends *much* on C implementation.
+!
+
+#if defined(CONFIG_MMU)
+	.align	2
+ENTRY(tlb_miss_load)
+	bra	call_dpf
+	 mov	#0, r5
+
+	.align	2
+ENTRY(tlb_miss_store)
+	bra	call_dpf
+	 mov	#1, r5
+
+	.align	2
+ENTRY(initial_page_write)
+	bra	call_dpf
+	 mov	#1, r5
+
+	.align	2
+ENTRY(tlb_protection_violation_load)
+	bra	call_dpf
+	 mov	#0, r5
+
+	.align	2
+ENTRY(tlb_protection_violation_store)
+	bra	call_dpf
+	 mov	#1, r5
+
+call_dpf:
+	mov.l	1f, r0
+ 	mov.l	@r0, r6		! address
+	mov.l	3f, r0
+
+	jmp	@r0
+ 	 mov	r15, r4		! regs
+
+	.align 2
+1:	.long	MMU_TEA
+3:	.long	do_page_fault
+
+	.align	2
+ENTRY(address_error_load)
+	bra	call_dae
+	 mov	#0,r5		! writeaccess = 0
+
+	.align	2
+ENTRY(address_error_store)
+	bra	call_dae
+	 mov	#1,r5		! writeaccess = 1
+
+	.align	2
+call_dae:
+	mov.l	1f, r0
+	mov.l	@r0, r6		! address
+	mov.l	2f, r0
+	jmp	@r0
+	 mov	r15, r4		! regs
+
+	.align 2
+1:	.long	MMU_TEA
+2:	.long   do_address_error
+#endif /* CONFIG_MMU */
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+	/* Unwind the stack and jmp to the debug entry */
+debug_kernel_fw:
+	mov.l	@r15+, r0
+	mov.l	@r15+, r1
+	mov.l	@r15+, r2
+	mov.l	@r15+, r3
+	mov.l	@r15+, r4
+	mov.l	@r15+, r5
+	mov.l	@r15+, r6
+	mov.l	@r15+, r7
+	stc	sr, r8
+	mov.l	1f, r9			! BL =1, RB=1, IMASK=0x0F
+	or	r9, r8
+	ldc	r8, sr			! here, change the register bank
+	mov.l	@r15+, r8
+	mov.l	@r15+, r9
+	mov.l	@r15+, r10
+	mov.l	@r15+, r11
+	mov.l	@r15+, r12
+	mov.l	@r15+, r13
+	mov.l	@r15+, r14
+	mov.l	@r15+, k0
+	ldc.l	@r15+, spc
+	lds.l	@r15+, pr
+	mov.l	@r15+, k1
+	ldc.l	@r15+, gbr
+	lds.l	@r15+, mach
+	lds.l	@r15+, macl
+	mov	k0, r15
+	!
+	mov.l	2f, k0
+	mov.l	@k0, k0
+	jmp	@k0
+	 ldc	k1, ssr
+	.align	2
+1:	.long	0x300000f0
+2:	.long	gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+restore_all:
+	mov.l	@r15+, r0
+	mov.l	@r15+, r1
+	mov.l	@r15+, r2
+	mov.l	@r15+, r3
+	mov.l	@r15+, r4
+	mov.l	@r15+, r5
+	mov.l	@r15+, r6
+	mov.l	@r15+, r7
+	!
+	stc	sr, r8
+	mov.l	7f, r9
+	or	r9, r8			! BL =1, RB=1
+	ldc	r8, sr			! here, change the register bank
+	!
+	mov.l	@r15+, r8
+	mov.l	@r15+, r9
+	mov.l	@r15+, r10
+	mov.l	@r15+, r11
+	mov.l	@r15+, r12
+	mov.l	@r15+, r13
+	mov.l	@r15+, r14
+	mov.l	@r15+, k4		! original stack pointer
+	ldc.l	@r15+, spc
+	lds.l	@r15+, pr
+	mov.l	@r15+, k3		! original SR
+	ldc.l	@r15+, gbr
+	lds.l	@r15+, mach
+	lds.l	@r15+, macl
+	add	#4, r15			! Skip syscall number
+	!
+#ifdef CONFIG_SH_DSP
+	mov.l	@r15+, k0		! DSP mode marker
+	mov.l	5f, k1
+	cmp/eq	k0, k1			! Do we have a DSP stack frame?
+	bf	skip_restore
+
+	stc	sr, k0			! Enable CPU DSP mode
+	or	k1, k0			! (within kernel it may be disabled)
+	ldc	k0, sr
+	mov	r2, k0			! Backup r2
+
+	! Restore DSP registers from stack
+	mov	r15, r2
+	movs.l	@r2+, a1
+	movs.l	@r2+, a0g
+	movs.l	@r2+, a1g
+	movs.l	@r2+, m0
+	movs.l	@r2+, m1
+	mov	r2, r15
+
+	lds.l	@r15+, a0
+	lds.l	@r15+, x0
+	lds.l	@r15+, x1
+	lds.l	@r15+, y0
+	lds.l	@r15+, y1
+	lds.l	@r15+, dsr
+	ldc.l	@r15+, rs
+	ldc.l	@r15+, re
+	ldc.l	@r15+, mod
+
+	mov	k0, r2			! Restore r2
+skip_restore:
+#endif
+	!
+	! Calculate new SR value
+	mov	k3, k2			! original SR value
+	mov	#0xf0, k1
+	extu.b	k1, k1
+	not	k1, k1
+	and	k1, k2			! Mask orignal SR value
+	!
+	mov	k3, k0			! Calculate IMASK-bits
+	shlr2	k0
+	and	#0x3c, k0
+	cmp/eq	#0x3c, k0
+	bt/s	6f
+	 shll2	k0
+	mov	g_imask, k0
+	!
+6:	or	k0, k2			! Set the IMASK-bits
+	ldc	k2, ssr
+	!
+#if defined(CONFIG_KGDB_NMI)
+	! Clear in_nmi
+	mov.l	6f, k0
+	mov	#0, k1
+	mov.b	k1, @k0
+#endif
+	mov.l	@r15+, k2		! restore EXPEVT
+	mov	k4, r15
+	rte
+	 nop
+
+	.align	2
+5:	.long	0x00001000	! DSP
+7:	.long	0x30000000
+
+! common exception handler
+#include "../../entry-common.S"
+	
+! Exception Vector Base
+!
+!	Should be aligned page boundary.
+!
+	.balign 	4096,0,4096
+ENTRY(vbr_base)
+	.long	0
+!
+	.balign 	256,0,256
+general_exception:
+	mov.l	1f, k2
+	mov.l	2f, k3
+	bra	handle_exception
+	 mov.l	@k2, k2
+	.align	2
+1:	.long	EXPEVT
+2:	.long	ret_from_exception
+!
+!
+
+/* This code makes some assumptions to improve performance.
+ * Make sure they are stil true. */
+#if PTRS_PER_PGD != PTRS_PER_PTE
+#error PGD and PTE sizes don't match
+#endif
+
+/* gas doesn't flag impossible values for mov #immediate as an error */
+#if (_PAGE_PRESENT >> 2) > 0x7f
+#error cannot load PAGE_PRESENT as an immediate
+#endif
+#if _PAGE_DIRTY > 0x7f
+#error cannot load PAGE_DIRTY as an immediate
+#endif
+#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
+#error cannot derive PAGE_ACCESSED from PAGE_PRESENT
+#endif
+
+#if defined(CONFIG_CPU_SH4)
+#define ldmmupteh(r)	mov.l	8f, r
+#else
+#define ldmmupteh(r)	mov	#MMU_PTEH, r
+#endif
+
+	.balign 	1024,0,1024
+tlb_miss:
+#ifdef COUNT_EXCEPTIONS
+	! Increment the counts
+	mov.l	9f, k1
+	mov.l	@k1, k2
+	add	#1, k2
+	mov.l	k2, @k1
+#endif
+
+	! k0 scratch
+	! k1 pgd and pte pointers
+	! k2 faulting address
+	! k3 pgd and pte index masks
+	! k4 shift
+
+	! Load up the pgd entry (k1)
+
+	ldmmupteh(k0)			!  9 LS (latency=2)	MMU_PTEH
+
+	mov.w	4f, k3			!  8 LS (latency=2)	(PTRS_PER_PGD-1) << 2
+	mov	#-(PGDIR_SHIFT-2), k4	!  6 EX
+
+	mov.l	@(MMU_TEA-MMU_PTEH,k0), k2	! 18 LS (latency=2)
+
+	mov.l	@(MMU_TTB-MMU_PTEH,k0), k1	! 18 LS (latency=2)
+
+	mov	k2, k0			!   5 MT (latency=0)
+	shld	k4, k0			!  99 EX
+
+	and	k3, k0			!  78 EX
+
+	mov.l	@(k0, k1), k1		!  21 LS (latency=2)
+	mov	#-(PAGE_SHIFT-2), k4	!   6 EX
+
+	! Load up the pte entry (k2)
+
+	mov	k2, k0			!   5 MT (latency=0)
+	shld	k4, k0			!  99 EX
+
+	tst	k1, k1			!  86 MT
+
+	bt	20f			! 110 BR
+
+	and	k3, k0			!  78 EX
+	mov.w	5f, k4			!   8 LS (latency=2)	_PAGE_PRESENT
+
+	mov.l	@(k0, k1), k2		!  21 LS (latency=2)
+	add	k0, k1			!  49 EX
+
+#ifdef CONFIG_CPU_HAS_PTEA
+	! Test the entry for present and _PAGE_ACCESSED
+
+	mov	#-28, k3		!   6 EX
+	mov	k2, k0			!   5 MT (latency=0)
+
+	tst	k4, k2			!  68 MT
+	shld	k3, k0			!  99 EX
+
+	bt	20f			! 110 BR
+
+	! Set PTEA register
+	! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
+	!
+	! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
+
+	and	#0xe, k0		!  79 EX
+
+	mov	k0, k3			!   5 MT (latency=0)
+	mov	k2, k0			!   5 MT (latency=0)
+
+	and	#1, k0			!  79 EX
+
+	or	k0, k3			!  82 EX
+
+	ldmmupteh(k0)			!   9 LS (latency=2)
+	shll2	k4			! 101 EX		_PAGE_ACCESSED
+
+	tst	k4, k2			!  68 MT
+
+	mov.l	k3, @(MMU_PTEA-MMU_PTEH,k0)	! 27 LS
+
+	mov.l	7f, k3			!   9 LS (latency=2)	_PAGE_FLAGS_HARDWARE_MASK
+
+	! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
+#else
+
+	! Test the entry for present and _PAGE_ACCESSED
+
+	mov.l	7f, k3			!   9 LS (latency=2)	_PAGE_FLAGS_HARDWARE_MASK
+	tst	k4, k2			!  68 MT
+
+	shll2	k4			! 101 EX		_PAGE_ACCESSED
+	ldmmupteh(k0)			!   9 LS (latency=2)
+
+	bt	20f			! 110 BR
+	tst	k4, k2			!  68 MT
+
+	! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
+
+#endif
+
+	! Set up the entry
+
+	and	k2, k3			!  78 EX
+	bt/s	10f			! 108 BR
+
+	 mov.l	k3, @(MMU_PTEL-MMU_PTEH,k0)	! 27 LS
+
+	ldtlb				! 128 CO
+
+	! At least one instruction between ldtlb and rte
+	nop				! 119 NOP
+
+	rte				! 126 CO
+
+	 nop				! 119 NOP
+
+
+10:	or	k4, k2			!  82 EX
+
+	ldtlb				! 128 CO
+
+	! At least one instruction between ldtlb and rte
+	mov.l	k2, @k1			!  27 LS
+
+	rte				! 126 CO
+
+	! Note we cannot execute mov here, because it is executed after
+	! restoring SSR, so would be executed in user space.
+	 nop				! 119 NOP
+
+
+	.align 5
+	! Once cache line if possible...
+1:	.long	swapper_pg_dir
+4:	.short	(PTRS_PER_PGD-1) << 2
+5:	.short	_PAGE_PRESENT
+7:	.long	_PAGE_FLAGS_HARDWARE_MASK
+8:	.long	MMU_PTEH
+#ifdef COUNT_EXCEPTIONS
+9:	.long	exception_count_miss
+#endif
+
+	! Either pgd or pte not present
+20:	mov.l	1f, k2
+	mov.l	4f, k3
+	bra	handle_exception
+	 mov.l	@k2, k2
+!
+	.balign 	512,0,512
+interrupt:
+	mov.l	2f, k2
+	mov.l	3f, k3
+#if defined(CONFIG_KGDB_NMI)
+	! Debounce (filter nested NMI)
+	mov.l	@k2, k0
+	mov.l	5f, k1
+	cmp/eq	k1, k0
+	bf	0f
+	mov.l	6f, k1
+	tas.b	@k1
+	bt	0f
+	rte
+	 nop
+	.align	2
+5:	.long	NMI_VEC
+6:	.long	in_nmi
+0:
+#endif /* defined(CONFIG_KGDB_NMI) */
+	bra	handle_exception
+	 mov	#-1, k2		! interrupt exception marker
+
+	.align	2
+1:	.long	EXPEVT
+2:	.long	INTEVT
+3:	.long	ret_from_irq
+4:	.long	ret_from_exception
+
+!
+!
+	.align	2
+ENTRY(handle_exception)
+	! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
+	! save all registers onto stack.
+	!
+	stc	ssr, k0		! Is it from kernel space?
+	shll	k0		! Check MD bit (bit30) by shifting it into...
+	shll	k0		!       ...the T bit
+	bt/s	1f		! It's a kernel to kernel transition.
+	 mov	r15, k0		! save original stack to k0
+	/* User space to kernel */
+	mov	#(THREAD_SIZE >> 10), k1
+	shll8	k1		! k1 := THREAD_SIZE
+	shll2	k1
+	add	current, k1
+	mov	k1, r15		! change to kernel stack
+	!
+1:	mov.l	2f, k1
+	!
+#ifdef CONFIG_SH_DSP
+	mov.l	r2, @-r15		! Save r2, we need another reg
+	stc	sr, k4
+	mov.l	1f, r2
+	tst	r2, k4			! Check if in DSP mode
+	mov.l	@r15+, r2		! Restore r2 now
+	bt/s	skip_save
+	 mov	#0, k4			! Set marker for no stack frame
+
+	mov	r2, k4			! Backup r2 (in k4) for later
+
+	! Save DSP registers on stack
+	stc.l	mod, @-r15
+	stc.l	re, @-r15
+	stc.l	rs, @-r15
+	sts.l	dsr, @-r15
+	sts.l	y1, @-r15
+	sts.l	y0, @-r15
+	sts.l	x1, @-r15
+	sts.l	x0, @-r15
+	sts.l	a0, @-r15
+
+	! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
+
+	! FIXME: Make sure that this is still the case with newer toolchains,
+	! as we're not at all interested in supporting ancient toolchains at
+	! this point. -- PFM.
+
+	mov	r15, r2
+	.word	0xf653			! movs.l	a1, @-r2
+	.word	0xf6f3			! movs.l	a0g, @-r2
+	.word	0xf6d3			! movs.l	a1g, @-r2
+	.word	0xf6c3			! movs.l	m0, @-r2
+	.word	0xf6e3			! movs.l	m1, @-r2
+	mov	r2, r15
+
+	mov	k4, r2			! Restore r2
+	mov.l	1f, k4			! Force DSP stack frame
+skip_save:
+	mov.l	k4, @-r15		! Push DSP mode marker onto stack
+#endif
+	! Save the user registers on the stack.
+	mov.l	k2, @-r15	! EXPEVT
+
+	mov	#-1, k4
+	mov.l	k4, @-r15	! set TRA (default: -1)
+	!
+	sts.l	macl, @-r15
+	sts.l	mach, @-r15
+	stc.l	gbr, @-r15
+	stc.l	ssr, @-r15
+	sts.l	pr, @-r15
+	stc.l	spc, @-r15
+	!
+	lds	k3, pr		! Set the return address to pr
+	!
+	mov.l	k0, @-r15	! save orignal stack
+	mov.l	r14, @-r15
+	mov.l	r13, @-r15
+	mov.l	r12, @-r15
+	mov.l	r11, @-r15
+	mov.l	r10, @-r15
+	mov.l	r9, @-r15
+	mov.l	r8, @-r15
+	!
+	stc	sr, r8		! Back to normal register bank, and
+	or	k1, r8		! Block all interrupts
+	mov.l	3f, k1
+	and	k1, r8		! ...
+	ldc	r8, sr		! ...changed here.
+	!
+	mov.l	r7, @-r15
+	mov.l	r6, @-r15
+	mov.l	r5, @-r15
+	mov.l	r4, @-r15
+	mov.l	r3, @-r15
+	mov.l	r2, @-r15
+	mov.l	r1, @-r15
+	mov.l	r0, @-r15
+
+	/*
+	 * This gets a bit tricky.. in the INTEVT case we don't want to use
+	 * the VBR offset as a destination in the jump call table, since all
+	 * of the destinations are the same. In this case, (interrupt) sets
+	 * a marker in r2 (now r2_bank since SR.RB changed), which we check
+	 * to determine the exception type. For all other exceptions, we
+	 * forcibly read EXPEVT from memory and fix up the jump address, in
+	 * the interrupt exception case we jump to do_IRQ() and defer the
+	 * INTEVT read until there. As a bonus, we can also clean up the SR.RB
+	 * checks that do_IRQ() was doing..
+	 */
+	stc	r2_bank, r8
+	cmp/pz	r8
+	bf	interrupt_exception
+	shlr2	r8
+	shlr	r8
+
+#ifdef COUNT_EXCEPTIONS
+	mov.l	5f, r9
+	add	r8, r9
+	mov.l	@r9, r10
+	add	#1, r10
+	mov.l	r10, @r9
+#endif
+
+	mov.l	4f, r9
+	add	r8, r9
+	mov.l	@r9, r9
+	jmp	@r9
+	 nop
+	rts
+	 nop
+
+	.align	2
+1:	.long	0x00001000	! DSP=1
+2:	.long	0x000080f0	! FD=1, IMASK=15
+3:	.long	0xcfffffff	! RB=0, BL=0
+4:	.long	exception_handling_table
+#ifdef COUNT_EXCEPTIONS
+5:	.long	exception_count_table
+#endif
+
+interrupt_exception:
+	mov.l	1f, r9
+	jmp	@r9
+	 nop
+	rts
+	 nop
+
+	.align 2
+1:	.long	do_IRQ
+
+	.align	2
+ENTRY(exception_none)
+	rts
+	 nop
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index 8dbf389..6e415ba 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -2,7 +2,8 @@
 # Makefile for the Linux/SuperH SH-4 backends.
 #
 
-obj-y	:= ex.o probe.o
+obj-y	:= ex.o probe.o common.o
+common-y	+= $(addprefix ../sh3/, entry.o)
 
 obj-$(CONFIG_SH_FPU)                    += fpu.o
 obj-$(CONFIG_SH_STORE_QUEUES)		+= sq.o
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index bfdf5fe..fa2019a 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -97,7 +97,7 @@
 
 static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
 {
-	struct clk *bclk = clk_get("bus_clk");
+	struct clk *bclk = clk_get(NULL, "bus_clk");
 	unsigned long bclk_rate = clk_get_rate(bclk);
 
 	clk_put(bclk);
@@ -151,7 +151,7 @@
 
 static int __init sh4202_clk_init(void)
 {
-	struct clk *clk = clk_get("master_clk");
+	struct clk *clk = clk_get(NULL, "master_clk");
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) {
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh7780.c b/arch/sh/kernel/cpu/sh4/clock-sh7780.c
index 93ad367..9e6a216 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh7780.c
@@ -98,7 +98,7 @@
 
 static int __init sh7780_clk_init(void)
 {
-	struct clk *clk = clk_get("master_clk");
+	struct clk *clk = clk_get(NULL, "master_clk");
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) {
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index f486c07..7624677 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -282,11 +282,8 @@
 			grab_fpu(regs);
 			restore_fpu(tsk);
 			set_tsk_thread_flag(tsk, TIF_USEDFPU);
-		} else {
-			tsk->thread.trap_no = 11;
-			tsk->thread.error_code = 0;
+		} else
 			force_sig(SIGFPE, tsk);
-		}
 
 		regs->pc = nextpc;
 		return 1;
@@ -296,29 +293,29 @@
 }
 
 asmlinkage void
-do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
-	     struct pt_regs regs)
+do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6,
+	     unsigned long r7, struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	struct task_struct *tsk = current;
 
-	if (ieee_fpe_handler (&regs))
+	if (ieee_fpe_handler(regs))
 		return;
 
-	regs.pc += 2;
-	save_fpu(tsk, &regs);
-	tsk->thread.trap_no = 11;
-	tsk->thread.error_code = 0;
+	regs->pc += 2;
+	save_fpu(tsk, regs);
 	force_sig(SIGFPE, tsk);
 }
 
 asmlinkage void
 do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
-		     unsigned long r7, struct pt_regs regs)
+		     unsigned long r7, struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	struct task_struct *tsk = current;
 
-	grab_fpu(&regs);
-	if (!user_mode(&regs)) {
+	grab_fpu(regs);
+	if (!user_mode(regs)) {
 		printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
 		return;
 	}
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index c294de1..afe0f1b 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -79,16 +79,16 @@
 	case 0x205:
 		cpu_data->type = CPU_SH7750;
 		cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
-				   CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
+				   CPU_HAS_PERF_COUNTER;
 		break;
 	case 0x206:
 		cpu_data->type = CPU_SH7750S;
 		cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
-				   CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
+				   CPU_HAS_PERF_COUNTER;
 		break;
 	case 0x1100:
 		cpu_data->type = CPU_SH7751;
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 		break;
 	case 0x2000:
 		cpu_data->type = CPU_SH73180;
@@ -126,23 +126,22 @@
 		break;
 	case 0x8000:
 		cpu_data->type = CPU_ST40RA;
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 		break;
 	case 0x8100:
 		cpu_data->type = CPU_ST40GX1;
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 		break;
 	case 0x700:
 		cpu_data->type = CPU_SH4_501;
 		cpu_data->icache.ways = 2;
 		cpu_data->dcache.ways = 2;
-		cpu_data->flags |= CPU_HAS_PTEA;
 		break;
 	case 0x600:
 		cpu_data->type = CPU_SH4_202;
 		cpu_data->icache.ways = 2;
 		cpu_data->dcache.ways = 2;
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 		break;
 	case 0x500 ... 0x501:
 		switch (prr) {
@@ -160,7 +159,7 @@
 		cpu_data->icache.ways = 2;
 		cpu_data->dcache.ways = 2;
 
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 
 		break;
 	default:
@@ -173,6 +172,10 @@
 	cpu_data->dcache.ways = 1;
 #endif
 
+#ifdef CONFIG_CPU_HAS_PTEA
+	cpu_data->flags |= CPU_HAS_PTEA;
+#endif
+
 	/*
 	 * On anything that's not a direct-mapped cache, look to the CVR
 	 * for I/D-cache specifics.
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 50812d5..bbcb06f 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -2,6 +2,7 @@
  * SH7750/SH7751 Setup
  *
  *  Copyright (C) 2006  Paul Mundt
+ *  Copyright (C) 2006  Jamie Lenehan
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -10,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
+#include <linux/io.h>
 #include <asm/sci.h>
 
 static struct plat_sci_port sci_platform_data[] = {
@@ -46,3 +48,71 @@
 				    ARRAY_SIZE(sh7750_devices));
 }
 __initcall(sh7750_devices_setup);
+
+static struct ipr_data sh7750_ipr_map[] = {
+	/* IRQ, IPR-idx, shift, priority */
+	{ 16, 0, 12, 2 }, /* TMU0 TUNI*/
+	{ 17, 0, 12, 2 }, /* TMU1 TUNI */
+	{ 18, 0,  4, 2 }, /* TMU2 TUNI */
+	{ 19, 0,  4, 2 }, /* TMU2 TIPCI */
+	{ 27, 1, 12, 2 }, /* WDT ITI */
+	{ 20, 0,  0, 2 }, /* RTC ATI (alarm) */
+	{ 21, 0,  0, 2 }, /* RTC PRI (period) */
+	{ 22, 0,  0, 2 }, /* RTC CUI (carry) */
+	{ 23, 1,  4, 3 }, /* SCI ERI */
+	{ 24, 1,  4, 3 }, /* SCI RXI */
+	{ 25, 1,  4, 3 }, /* SCI TXI */
+	{ 40, 2,  4, 3 }, /* SCIF ERI */
+	{ 41, 2,  4, 3 }, /* SCIF RXI */
+	{ 42, 2,  4, 3 }, /* SCIF BRI */
+	{ 43, 2,  4, 3 }, /* SCIF TXI */
+	{ 34, 2,  8, 7 }, /* DMAC DMTE0 */
+	{ 35, 2,  8, 7 }, /* DMAC DMTE1 */
+	{ 36, 2,  8, 7 }, /* DMAC DMTE2 */
+	{ 37, 2,  8, 7 }, /* DMAC DMTE3 */
+	{ 28, 2,  8, 7 }, /* DMAC DMAE */
+};
+
+static struct ipr_data sh7751_ipr_map[] = {
+	{ 44, 2,  8, 7 }, /* DMAC DMTE4 */
+	{ 45, 2,  8, 7 }, /* DMAC DMTE5 */
+	{ 46, 2,  8, 7 }, /* DMAC DMTE6 */
+	{ 47, 2,  8, 7 }, /* DMAC DMTE7 */
+	/* The following use INTC_INPRI00 for masking, which is a 32-bit
+	   register, not a 16-bit register like the IPRx registers, so it
+	   would need special support */
+	/*{ 72, INTPRI00,  8, ? },*/ /* TMU3 TUNI */
+	/*{ 76, INTPRI00, 12, ? },*/ /* TMU4 TUNI */
+};
+
+static unsigned long ipr_offsets[] = {
+	0xffd00004UL,	/* 0: IPRA */
+	0xffd00008UL,	/* 1: IPRB */
+	0xffd0000cUL,	/* 2: IPRC */
+	0xffd00010UL,	/* 3: IPRD */
+};
+
+/* given the IPR index return the address of the IPR register */
+unsigned int map_ipridx_to_addr(int idx)
+{
+	if (idx >= ARRAY_SIZE(ipr_offsets))
+		return 0;
+	return ipr_offsets[idx];
+}
+
+#define INTC_ICR	0xffd00000UL
+#define INTC_ICR_IRLM   (1<<7)
+
+/* enable individual interrupt mode for external interupts */
+void ipr_irq_enable_irlm(void)
+{
+	ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+}
+
+void __init init_IRQ_ipr()
+{
+	make_ipr_irq(sh7750_ipr_map, ARRAY_SIZE(sh7750_ipr_map));
+#ifdef CONFIG_CPU_SUBTYPE_SH7751
+	make_ipr_irq(sh7751_ipr_map, ARRAY_SIZE(sh7751_ipr_map));
+#endif
+}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7780.c b/arch/sh/kernel/cpu/sh4/setup-sh7780.c
index 814ddb2..9aeaa2d 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7780.c
@@ -79,25 +79,27 @@
 __initcall(sh7780_devices_setup);
 
 static struct intc2_data intc2_irq_table[] = {
-	{ TIMER_IRQ, 0, 24, 0, INTC_TMU0_MSK, 2 },
-	{ 21, 1, 0, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-	{ 22, 1, 1, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-	{ 23, 1, 2, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-	{ SCIF0_ERI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
-	{ SCIF0_RXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
-	{ SCIF0_BRI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
-	{ SCIF0_TXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
+	{ 28, 0, 24, 0, 0, 2 },		/* TMU0 */
 
-	{ SCIF1_ERI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
-	{ SCIF1_RXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
-	{ SCIF1_BRI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
-	{ SCIF1_TXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
+	{ 21, 1,  0, 0, 2, 2 },
+	{ 22, 1,  1, 0, 2, 2 },
+	{ 23, 1,  2, 0, 2, 2 },
 
-	{ PCIC0_IRQ, 0x10,  8, 0, INTC_PCIC0_MSK, PCIC0_PRIORITY },
-	{ PCIC1_IRQ, 0x10,  0, 0, INTC_PCIC1_MSK, PCIC1_PRIORITY },
-	{ PCIC2_IRQ, 0x14, 24, 0, INTC_PCIC2_MSK, PCIC2_PRIORITY },
-	{ PCIC3_IRQ, 0x14, 16, 0, INTC_PCIC3_MSK, PCIC3_PRIORITY },
-	{ PCIC4_IRQ, 0x14,  8, 0, INTC_PCIC4_MSK, PCIC4_PRIORITY },
+	{ 40, 8, 24, 0, 3, 3 },		/* SCIF0 ERI */
+	{ 41, 8, 24, 0, 3, 3 },		/* SCIF0 RXI */
+	{ 42, 8, 24, 0, 3, 3 },		/* SCIF0 BRI */
+	{ 43, 8, 24, 0, 3, 3 },		/* SCIF0 TXI */
+
+	{ 76, 8, 16, 0, 4, 3 },		/* SCIF1 ERI */
+	{ 77, 8, 16, 0, 4, 3 },		/* SCIF1 RXI */
+	{ 78, 8, 16, 0, 4, 3 },		/* SCIF1 BRI */
+	{ 79, 8, 16, 0, 4, 3 },		/* SCIF1 TXI */
+
+	{ 64, 0x10,  8, 0, 14, 2 },	/* PCIC0 */
+	{ 65, 0x10,  0, 0, 15, 2 },	/* PCIC1 */
+	{ 66, 0x14, 24, 0, 16, 2 },	/* PCIC2 */
+	{ 67, 0x14, 16, 0, 17, 2 },	/* PCIC3 */
+	{ 68, 0x14,  8, 0, 18, 2 },	/* PCIC4 */
 };
 
 void __init init_IRQ_intc2(void)
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 7bcc73f..0c9ea38 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -19,7 +19,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 #include <asm/cpu/sq.h>
@@ -38,7 +38,7 @@
 
 static struct sq_mapping *sq_mapping_list;
 static DEFINE_SPINLOCK(sq_mapping_lock);
-static kmem_cache_t *sq_cache;
+static struct kmem_cache *sq_cache;
 static unsigned long *sq_bitmap;
 
 #define store_queue_barrier()			\
@@ -67,6 +67,7 @@
 	/* Wait for completion */
 	store_queue_barrier();
 }
+EXPORT_SYMBOL(sq_flush_range);
 
 static inline void sq_mapping_list_add(struct sq_mapping *map)
 {
@@ -166,7 +167,7 @@
 	map->size = size;
 	map->name = name;
 
-	page = bitmap_find_free_region(sq_bitmap, 0x04000000,
+	page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
 				       get_order(map->size));
 	if (unlikely(page < 0)) {
 		ret = -ENOSPC;
@@ -193,6 +194,7 @@
 	kmem_cache_free(sq_cache, map);
 	return ret;
 }
+EXPORT_SYMBOL(sq_remap);
 
 /**
  * sq_unmap - Unmap a Store Queue allocation
@@ -234,6 +236,7 @@
 
 	kmem_cache_free(sq_cache, map);
 }
+EXPORT_SYMBOL(sq_unmap);
 
 /*
  * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
@@ -402,7 +405,3 @@
 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(sq_remap);
-EXPORT_SYMBOL(sq_unmap);
-EXPORT_SYMBOL(sq_flush_range);
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index a000227..6034082 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -12,7 +12,7 @@
 #include <linux/console.h>
 #include <linux/tty.h>
 #include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #ifdef CONFIG_SH_STANDARD_BIOS
 #include <asm/sh_bios.h>
@@ -62,17 +62,9 @@
 #include <linux/serial_core.h>
 #include "../../../drivers/serial/sh-sci.h"
 
-#ifdef CONFIG_CPU_SH4
-#define SCIF_REG	0xffe80000
-#elif defined(CONFIG_CPU_SUBTYPE_SH72060)
-#define SCIF_REG	0xfffe9800
-#else
-#error "Undefined SCIF for this subtype"
-#endif
-
 static struct uart_port scif_port = {
-	.mapbase	= SCIF_REG,
-	.membase	= (char __iomem *)SCIF_REG,
+	.mapbase	= CONFIG_EARLY_SCIF_CONSOLE_PORT,
+	.membase	= (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT,
 };
 
 static void scif_sercon_putc(int c)
@@ -113,23 +105,29 @@
 	.index		= -1,
 };
 
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
+/*
+ * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
+ * devices that aren't using sh-ipl+g.
+ */
 static void scif_sercon_init(int baud)
 {
-	ctrl_outw(0, SCIF_REG + 8);
-	ctrl_outw(0, SCIF_REG);
+	ctrl_outw(0, scif_port.mapbase + 8);
+	ctrl_outw(0, scif_port.mapbase);
 
 	/* Set baud rate */
 	ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) /
-		  (32 * baud) - 1, SCIF_REG + 4);
+		  (32 * baud) - 1, scif_port.mapbase + 4);
 
-	ctrl_outw(12, SCIF_REG + 24);
-	ctrl_outw(8, SCIF_REG + 24);
-	ctrl_outw(0, SCIF_REG + 32);
-	ctrl_outw(0x60, SCIF_REG + 16);
-	ctrl_outw(0, SCIF_REG + 36);
-	ctrl_outw(0x30, SCIF_REG + 8);
+	ctrl_outw(12, scif_port.mapbase + 24);
+	ctrl_outw(8, scif_port.mapbase + 24);
+	ctrl_outw(0, scif_port.mapbase + 32);
+	ctrl_outw(0x60, scif_port.mapbase + 16);
+	ctrl_outw(0, scif_port.mapbase + 36);
+	ctrl_outw(0x30, scif_port.mapbase + 8);
 }
-#endif
+#endif /* CONFIG_CPU_SH4 && !CONFIG_SH_STANDARD_BIOS */
+#endif /* CONFIG_EARLY_SCIF_CONSOLE */
 
 /*
  * Setup a default console, if more than one is compiled in, rely on the
@@ -168,7 +166,7 @@
 	if (!strncmp(buf, "serial", 6)) {
 		early_console = &scif_console;
 
-#ifdef CONFIG_CPU_SH4
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
 		scif_sercon_init(115200);
 #endif
 	}
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
new file mode 100644
index 0000000..29136a3
--- /dev/null
+++ b/arch/sh/kernel/entry-common.S
@@ -0,0 +1,433 @@
+/* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $
+ *
+ *  linux/arch/sh/entry.S
+ *
+ *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
+ *  Copyright (C) 2003  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*	
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * NOTE: This code uses a convention that instructions in the delay slot
+ * of a transfer-control instruction are indented by an extra space, thus:
+ *
+ *    jmp	@k0	    ! control-transfer instruction
+ *     ldc	k1, ssr     ! delay slot
+ *
+ * Stack layout in 'ret_from_syscall':
+ * 	ptrace needs to have all regs on the stack.
+ *	if the order here is changed, it needs to be
+ *	updated in ptrace.c and ptrace.h
+ *
+ *	r0
+ *      ...
+ *	r15 = stack pointer
+ *	spc
+ *	pr
+ *	ssr
+ *	gbr
+ *	mach
+ *	macl
+ *	syscall #
+ *
+ */
+
+#if defined(CONFIG_PREEMPT)
+#  define preempt_stop()	cli
+#else
+#  define preempt_stop()
+#  define resume_kernel		__restore_all
+#endif
+
+#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
+! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
+! If both are configured, handle the debug traps (breakpoints) in SW,
+! but still allow BIOS traps to FW.
+
+	.align	2
+debug_kernel:
+#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
+	/* Force BIOS call to FW (debug_trap put TRA in r8) */
+	mov	r8,r0
+	shlr2	r0
+	cmp/eq	#0x3f,r0
+	bt	debug_kernel_fw
+#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
+
+debug_enter:		
+#if defined(CONFIG_SH_KGDB)
+	/* Jump to kgdb, pass stacked regs as arg */
+debug_kernel_sw:
+	mov.l	3f, r0
+	jmp	@r0
+	 mov	r15, r4
+	.align	2
+3:	.long	kgdb_handle_exception
+#endif /* CONFIG_SH_KGDB */
+
+#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
+
+
+	.align	2
+debug_trap:	
+#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
+	mov	#OFF_SR, r0
+	mov.l	@(r0,r15), r0		! get status register
+	shll	r0
+	shll	r0			! kernel space?
+	bt/s	debug_kernel
+#endif
+	 mov.l	@r15, r0		! Restore R0 value
+	mov.l	1f, r8
+	jmp	@r8
+	 nop
+
+	.align	2
+ENTRY(exception_error)
+	!
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	3f, r0
+	jsr	@r0
+	 nop
+#endif
+	sti
+	mov.l	2f, r0
+	jmp	@r0
+	 nop
+
+!
+	.align	2
+1:	.long	break_point_trap_software
+2:	.long	do_exception_error
+#ifdef CONFIG_TRACE_IRQFLAGS
+3:	.long	trace_hardirqs_on
+#endif
+
+	.align	2
+ret_from_exception:
+	preempt_stop()
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	4f, r0
+	jsr	@r0
+	 nop
+#endif
+ENTRY(ret_from_irq)
+	!
+	mov	#OFF_SR, r0
+	mov.l	@(r0,r15), r0	! get status register
+	shll	r0
+	shll	r0		! kernel space?
+	get_current_thread_info r8, r0
+	bt	resume_kernel	! Yes, it's from kernel, go back soon
+
+#ifdef CONFIG_PREEMPT
+	bra	resume_userspace
+	 nop
+ENTRY(resume_kernel)
+	mov.l	@(TI_PRE_COUNT,r8), r0	! current_thread_info->preempt_count
+	tst	r0, r0
+	bf	noresched
+need_resched:
+	mov.l	@(TI_FLAGS,r8), r0	! current_thread_info->flags
+	tst	#_TIF_NEED_RESCHED, r0	! need_resched set?
+	bt	noresched
+
+	mov	#OFF_SR, r0
+	mov.l	@(r0,r15), r0		! get status register
+	and	#0xf0, r0		! interrupts off (exception path)?
+	cmp/eq	#0xf0, r0
+	bt	noresched
+
+	mov.l	1f, r0
+	mov.l	r0, @(TI_PRE_COUNT,r8)
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	3f, r0
+	jsr	@r0
+	 nop
+#endif
+	sti
+	mov.l	2f, r0
+	jsr	@r0
+	 nop
+	mov	#0, r0
+	mov.l	r0, @(TI_PRE_COUNT,r8)
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	4f, r0
+	jsr	@r0
+	 nop
+#endif
+
+	bra	need_resched
+	 nop
+
+noresched:
+	bra	__restore_all
+	 nop
+
+	.align 2
+1:	.long	PREEMPT_ACTIVE
+2:	.long	schedule
+#ifdef CONFIG_TRACE_IRQFLAGS
+3:	.long	trace_hardirqs_on
+4:	.long	trace_hardirqs_off
+#endif
+#endif
+
+ENTRY(resume_userspace)
+	! r8: current_thread_info
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r0
+	jsr	@r0
+	 nop
+#endif
+	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
+	tst	#_TIF_WORK_MASK, r0
+	bt/s	__restore_all
+	 tst	#_TIF_NEED_RESCHED, r0
+
+	.align	2
+work_pending:
+	! r0: current_thread_info->flags
+	! r8: current_thread_info
+	! t:  result of "tst	#_TIF_NEED_RESCHED, r0"
+	bf/s	work_resched
+	 tst	#(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
+work_notifysig:
+	bt/s	__restore_all
+	 mov	r15, r4
+	mov	r12, r5		! set arg1(save_r0)
+	mov	r0, r6
+	mov.l	2f, r1
+	mov.l	3f, r0
+	jmp	@r1
+	 lds	r0, pr
+work_resched:
+#ifndef CONFIG_PREEMPT
+	! gUSA handling
+	mov.l	@(OFF_SP,r15), r0	! get user space stack pointer
+	mov	r0, r1
+	shll	r0
+	bf/s	1f
+	 shll	r0
+	bf/s	1f
+	 mov	#OFF_PC, r0
+	! 				  SP >= 0xc0000000 : gUSA mark
+	mov.l	@(r0,r15), r2		! get user space PC (program counter)
+	mov.l	@(OFF_R0,r15), r3	! end point
+	cmp/hs	r3, r2			! r2 >= r3? 
+	bt	1f
+	add	r3, r1			! rewind point #2
+	mov.l	r1, @(r0,r15)		! reset PC to rewind point #2
+	!
+1:
+#endif
+	mov.l	1f, r1
+	jsr	@r1				! schedule
+	 nop
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r0
+	jsr	@r0
+	 nop
+#endif
+	!
+	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
+	tst	#_TIF_WORK_MASK, r0
+	bt	__restore_all
+	bra	work_pending
+	 tst	#_TIF_NEED_RESCHED, r0
+
+	.align	2
+1:	.long	schedule
+2:	.long	do_notify_resume
+3:	.long	restore_all
+#ifdef CONFIG_TRACE_IRQFLAGS
+4:	.long	trace_hardirqs_on
+5:	.long	trace_hardirqs_off
+#endif
+
+	.align	2
+syscall_exit_work:
+	! r0: current_thread_info->flags
+	! r8: current_thread_info
+	tst	#_TIF_SYSCALL_TRACE, r0
+	bt/s	work_pending
+	 tst	#_TIF_NEED_RESCHED, r0
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r0
+	jsr	@r0
+	 nop
+#endif
+	sti
+	! XXX setup arguments...
+	mov.l	4f, r0			! do_syscall_trace
+	jsr	@r0
+	 nop
+	bra	resume_userspace
+	 nop
+
+	.align	2
+syscall_trace_entry:
+	!                     	Yes it is traced.
+	! XXX setup arguments...
+	mov.l	4f, r11		! Call do_syscall_trace which notifies
+	jsr	@r11	    	! superior (will chomp R[0-7])
+	 nop
+	!			Reload R0-R4 from kernel stack, where the
+	!   	    	    	parent may have modified them using
+	!   	    	    	ptrace(POKEUSR).  (Note that R0-R2 are
+	!   	    	    	used by the system call handler directly
+	!   	    	    	from the kernel stack anyway, so don't need
+	!   	    	    	to be reloaded here.)  This allows the parent
+	!   	    	    	to rewrite system calls and args on the fly.
+	mov.l	@(OFF_R4,r15), r4   ! arg0
+	mov.l	@(OFF_R5,r15), r5
+	mov.l	@(OFF_R6,r15), r6
+	mov.l	@(OFF_R7,r15), r7   ! arg3
+	mov.l	@(OFF_R3,r15), r3   ! syscall_nr
+	!
+	mov.l	2f, r10			! Number of syscalls
+	cmp/hs	r10, r3
+	bf	syscall_call
+	mov	#-ENOSYS, r0
+	bra	syscall_exit
+	 mov.l	r0, @(OFF_R0,r15)	! Return value
+
+__restore_all:
+	mov.l	1f, r0
+	jmp	@r0
+	 nop
+
+	.align	2
+1:	.long	restore_all
+
+	.align	2
+not_syscall_tra:	
+	bra	debug_trap
+	 nop
+
+	.align	2
+syscall_badsys:			! Bad syscall number
+	mov	#-ENOSYS, r0
+	bra	resume_userspace
+	 mov.l	r0, @(OFF_R0,r15)	! Return value
+	
+
+/*
+ * Syscall interface:
+ *
+ *	Syscall #: R3
+ *	Arguments #0 to #3: R4--R7
+ *	Arguments #4 to #6: R0, R1, R2
+ *	TRA: (number of arguments + 0x10) x 4
+ *
+ * This code also handles delegating other traps to the BIOS/gdb stub
+ * according to:
+ *
+ * Trap number
+ * (TRA>>2) 	    Purpose
+ * -------- 	    -------
+ * 0x0-0xf  	    old syscall ABI
+ * 0x10-0x1f  	    new syscall ABI
+ * 0x20-0xff  	    delegated through debug_trap to BIOS/gdb stub.
+ *
+ * Note: When we're first called, the TRA value must be shifted
+ * right 2 bits in order to get the value that was used as the "trapa"
+ * argument.
+ */
+
+	.align	2
+	.globl	ret_from_fork
+ret_from_fork:
+	mov.l	1f, r8
+	jsr	@r8
+	 mov	r0, r4
+	bra	syscall_exit
+	 nop
+	.align	2
+1:	.long	schedule_tail
+	!
+ENTRY(system_call)
+#if !defined(CONFIG_CPU_SH2)
+	mov.l	1f, r9
+	mov.l	@r9, r8		! Read from TRA (Trap Address) Register
+#endif
+	!
+	! Is the trap argument >= 0x20? (TRA will be >= 0x80)
+	mov	#0x7f, r9
+	cmp/hi	r9, r8
+	bt/s	not_syscall_tra
+	 mov	#OFF_TRA, r9
+	add	r15, r9
+	mov.l	r8, @r9			! set TRA value to tra
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r10
+	jsr	@r10
+	 nop
+#endif
+	sti
+
+	!
+	get_current_thread_info r8, r10
+	mov.l	@(TI_FLAGS,r8), r8
+	mov	#_TIF_SYSCALL_TRACE, r10
+	tst	r10, r8
+	bf	syscall_trace_entry
+	!
+	mov.l	2f, r8			! Number of syscalls
+	cmp/hs	r8, r3
+	bt	syscall_badsys
+	!
+syscall_call:
+	shll2	r3		! x4
+	mov.l	3f, r8		! Load the address of sys_call_table
+	add	r8, r3
+	mov.l	@r3, r8
+	jsr	@r8	    	! jump to specific syscall handler
+	 nop
+	mov.l	@(OFF_R0,r15), r12		! save r0
+	mov.l	r0, @(OFF_R0,r15)		! save the return value
+	!
+syscall_exit:
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	6f, r0
+	jsr	@r0
+	 nop
+#endif
+	!
+	get_current_thread_info r8, r0
+	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
+	tst	#_TIF_ALLWORK_MASK, r0
+	bf	syscall_exit_work
+	bra	__restore_all
+	 nop
+	.align	2
+#if !defined(CONFIG_CPU_SH2)
+1:	.long	TRA
+#endif
+2:	.long	NR_syscalls
+3:	.long	sys_call_table
+4:	.long	do_syscall_trace
+#ifdef CONFIG_TRACE_IRQFLAGS
+5:	.long	trace_hardirqs_on
+6:	.long	trace_hardirqs_off
+#endif
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
deleted file mode 100644
index 39aaefb..0000000
--- a/arch/sh/kernel/entry.S
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- *  linux/arch/sh/entry.S
- *
- *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
- *  Copyright (C) 2003 - 2006  Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
-#include <linux/sys.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/cpu/mmu_context.h>
-#include <asm/unistd.h>
-
-! NOTE:
-! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
-! to be jumped is too far, but it causes illegal slot exception.
-
-/*	
- * entry.S contains the system-call and fault low-level handling routines.
- * This also contains the timer-interrupt handler, as well as all interrupts
- * and faults that can result in a task-switch.
- *
- * NOTE: This code handles signal-recognition, which happens every time
- * after a timer-interrupt and after each system call.
- *
- * NOTE: This code uses a convention that instructions in the delay slot
- * of a transfer-control instruction are indented by an extra space, thus:
- *
- *    jmp	@k0	    ! control-transfer instruction
- *     ldc	k1, ssr     ! delay slot
- *
- * Stack layout in 'ret_from_syscall':
- * 	ptrace needs to have all regs on the stack.
- *	if the order here is changed, it needs to be
- *	updated in ptrace.c and ptrace.h
- *
- *	r0
- *      ...
- *	r15 = stack pointer
- *	spc
- *	pr
- *	ssr
- *	gbr
- *	mach
- *	macl
- *	syscall #
- *
- */
-#if defined(CONFIG_KGDB_NMI)
-NMI_VEC = 0x1c0			! Must catch early for debounce
-#endif
-
-/* Offsets to the stack */
-OFF_R0  =  0		/* Return value. New ABI also arg4 */
-OFF_R1  =  4     	/* New ABI: arg5 */
-OFF_R2  =  8     	/* New ABI: arg6 */
-OFF_R3  =  12     	/* New ABI: syscall_nr */
-OFF_R4  =  16     	/* New ABI: arg0 */
-OFF_R5  =  20     	/* New ABI: arg1 */
-OFF_R6  =  24     	/* New ABI: arg2 */
-OFF_R7  =  28     	/* New ABI: arg3 */
-OFF_SP	=  (15*4)
-OFF_PC  =  (16*4)
-OFF_SR	=  (16*4+8)
-OFF_TRA	=  (16*4+6*4)
-
-
-#define k0	r0
-#define k1	r1
-#define k2	r2
-#define k3	r3
-#define k4	r4
-
-#define g_imask		r6	/* r6_bank1 */
-#define k_g_imask	r6_bank	/* r6_bank1 */
-#define current		r7	/* r7_bank1 */
-
-/*
- * Kernel mode register usage:
- *	k0	scratch
- *	k1	scratch
- *	k2	scratch (Exception code)
- *	k3	scratch (Return address)
- *	k4	scratch
- *	k5	reserved
- *	k6	Global Interrupt Mask (0--15 << 4)
- *	k7	CURRENT_THREAD_INFO (pointer to current thread info)
- */
-
-!
-! TLB Miss / Initial Page write exception handling
-!			_and_
-! TLB hits, but the access violate the protection.
-! It can be valid access, such as stack grow and/or C-O-W.
-!
-!
-! Find the pmd/pte entry and loadtlb
-! If it's not found, cause address error (SEGV)
-!
-! Although this could be written in assembly language (and it'd be faster),
-! this first version depends *much* on C implementation.
-!
-
-#define CLI()				\
-	stc	sr, r0;			\
-	or	#0xf0, r0;		\
-	ldc	r0, sr
-
-#define STI()				\
-	mov.l	__INV_IMASK, r11;	\
-	stc	sr, r10;		\
-	and	r11, r10;		\
-	stc	k_g_imask, r11;		\
-	or	r11, r10;		\
-	ldc	r10, sr
-
-#if defined(CONFIG_PREEMPT)
-#  define preempt_stop()	CLI()
-#else
-#  define preempt_stop()
-#  define resume_kernel		restore_all
-#endif
-
-#if defined(CONFIG_MMU)
-	.align	2
-ENTRY(tlb_miss_load)
-	bra	call_dpf
-	 mov	#0, r5
-
-	.align	2
-ENTRY(tlb_miss_store)
-	bra	call_dpf
-	 mov	#1, r5
-
-	.align	2
-ENTRY(initial_page_write)
-	bra	call_dpf
-	 mov	#1, r5
-
-	.align	2
-ENTRY(tlb_protection_violation_load)
-	bra	call_dpf
-	 mov	#0, r5
-
-	.align	2
-ENTRY(tlb_protection_violation_store)
-	bra	call_dpf
-	 mov	#1, r5
-
-call_dpf:
-	mov.l	1f, r0
-	mov	r5, r8
-	mov.l	@r0, r6
-	mov	r6, r9
-	mov.l	2f, r0
-	sts	pr, r10
-	jsr	@r0
-	 mov	r15, r4
-	!
-	tst	r0, r0
-	bf/s	0f
-	 lds	r10, pr
-	rts
-	 nop
-0:	STI()
-	mov.l	3f, r0
-	mov	r9, r6
-	mov	r8, r5
-	jmp	@r0
-	 mov	r15, r4
-
-	.align 2
-1:	.long	MMU_TEA
-2:	.long	__do_page_fault
-3:	.long	do_page_fault
-
-	.align	2
-ENTRY(address_error_load)
-	bra	call_dae
-	 mov	#0,r5		! writeaccess = 0
-
-	.align	2
-ENTRY(address_error_store)
-	bra	call_dae
-	 mov	#1,r5		! writeaccess = 1
-
-	.align	2
-call_dae:
-	mov.l	1f, r0
-	mov.l	@r0, r6		! address
-	mov.l	2f, r0
-	jmp	@r0
-	 mov	r15, r4		! regs
-
-	.align 2
-1:	.long	MMU_TEA
-2:	.long   do_address_error
-#endif /* CONFIG_MMU */
-
-#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
-! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
-! If both are configured, handle the debug traps (breakpoints) in SW,
-! but still allow BIOS traps to FW.
-
-	.align	2
-debug_kernel:
-#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
-	/* Force BIOS call to FW (debug_trap put TRA in r8) */
-	mov	r8,r0
-	shlr2	r0
-	cmp/eq	#0x3f,r0
-	bt	debug_kernel_fw
-#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
-
-debug_enter:		
-#if defined(CONFIG_SH_KGDB)
-	/* Jump to kgdb, pass stacked regs as arg */
-debug_kernel_sw:
-	mov.l	3f, r0
-	jmp	@r0
-	 mov	r15, r4
-	.align	2
-3:	.long	kgdb_handle_exception
-#endif /* CONFIG_SH_KGDB */
-
-#if defined(CONFIG_SH_STANDARD_BIOS)
-	/* Unwind the stack and jmp to the debug entry */
-debug_kernel_fw:
-	mov.l	@r15+, r0
-	mov.l	@r15+, r1
-	mov.l	@r15+, r2
-	mov.l	@r15+, r3
-	mov.l	@r15+, r4
-	mov.l	@r15+, r5
-	mov.l	@r15+, r6
-	mov.l	@r15+, r7
-	stc	sr, r8
-	mov.l	1f, r9			! BL =1, RB=1, IMASK=0x0F
-	or	r9, r8
-	ldc	r8, sr			! here, change the register bank
-	mov.l	@r15+, r8
-	mov.l	@r15+, r9
-	mov.l	@r15+, r10
-	mov.l	@r15+, r11
-	mov.l	@r15+, r12
-	mov.l	@r15+, r13
-	mov.l	@r15+, r14
-	mov.l	@r15+, k0
-	ldc.l	@r15+, spc
-	lds.l	@r15+, pr
-	mov.l	@r15+, k1
-	ldc.l	@r15+, gbr
-	lds.l	@r15+, mach
-	lds.l	@r15+, macl
-	mov	k0, r15
-	!
-	mov.l	2f, k0
-	mov.l	@k0, k0
-	jmp	@k0
-	 ldc	k1, ssr
-	.align	2
-1:	.long	0x300000f0
-2:	.long	gdb_vbr_vector
-#endif /* CONFIG_SH_STANDARD_BIOS */
-
-#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
-
-
-	.align	2
-debug_trap:	
-#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
-	mov	#OFF_SR, r0
-	mov.l	@(r0,r15), r0		! get status register
-	shll	r0
-	shll	r0			! kernel space?
-	bt/s	debug_kernel
-#endif
-	 mov.l	@r15, r0		! Restore R0 value
-	mov.l	1f, r8
-	jmp	@r8
-	 nop
-
-	.align	2
-ENTRY(exception_error)
-	!
-	STI()
-	mov.l	2f, r0
-	jmp	@r0
-	 nop
-
-!
-	.align	2
-1:	.long	break_point_trap_software
-2:	.long	do_exception_error
-
-	.align	2
-ret_from_exception:
-	preempt_stop()
-ENTRY(ret_from_irq)
-	!
-	mov	#OFF_SR, r0
-	mov.l	@(r0,r15), r0	! get status register
-	shll	r0
-	shll	r0		! kernel space?
-	bt/s	resume_kernel	! Yes, it's from kernel, go back soon
-	 GET_THREAD_INFO(r8)
-
-#ifdef CONFIG_PREEMPT
-	bra	resume_userspace
-	 nop
-ENTRY(resume_kernel)
-	mov.l	@(TI_PRE_COUNT,r8), r0	! current_thread_info->preempt_count
-	tst	r0, r0
-	bf	noresched
-need_resched:
-	mov.l	@(TI_FLAGS,r8), r0	! current_thread_info->flags
-	tst	#_TIF_NEED_RESCHED, r0	! need_resched set?
-	bt	noresched
-
-	mov	#OFF_SR, r0
-	mov.l	@(r0,r15), r0		! get status register
-	and	#0xf0, r0		! interrupts off (exception path)?
-	cmp/eq	#0xf0, r0
-	bt	noresched
-
-	mov.l	1f, r0
-	mov.l	r0, @(TI_PRE_COUNT,r8)
-
-	STI()
-	mov.l	2f, r0
-	jsr	@r0
-	 nop
-	mov	#0, r0
-	mov.l	r0, @(TI_PRE_COUNT,r8)
-	CLI()
-
-	bra	need_resched
-	 nop
-noresched:
-	bra	restore_all
-	 nop
-
-	.align 2
-1:	.long	PREEMPT_ACTIVE
-2:	.long	schedule
-#endif
-
-ENTRY(resume_userspace)
-	! r8: current_thread_info
-	CLI()
-	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
-	tst	#_TIF_WORK_MASK, r0
-	bt/s	restore_all
-	 tst	#_TIF_NEED_RESCHED, r0
-
-	.align	2
-work_pending:
-	! r0: current_thread_info->flags
-	! r8: current_thread_info
-	! t:  result of "tst	#_TIF_NEED_RESCHED, r0"
-	bf/s	work_resched
-	 tst	#(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
-work_notifysig:
-	bt/s	restore_all
-	 mov	r15, r4
-	mov	r12, r5		! set arg1(save_r0)
-	mov	r0, r6
-	mov.l	2f, r1
-	mova	restore_all, r0
-	jmp	@r1
-	 lds	r0, pr
-work_resched:
-#ifndef CONFIG_PREEMPT
-	! gUSA handling
-	mov.l	@(OFF_SP,r15), r0	! get user space stack pointer
-	mov	r0, r1
-	shll	r0
-	bf/s	1f
-	 shll	r0
-	bf/s	1f
-	 mov	#OFF_PC, r0
-	! 				  SP >= 0xc0000000 : gUSA mark
-	mov.l	@(r0,r15), r2		! get user space PC (program counter)
-	mov.l	@(OFF_R0,r15), r3	! end point
-	cmp/hs	r3, r2			! r2 >= r3? 
-	bt	1f
-	add	r3, r1			! rewind point #2
-	mov.l	r1, @(r0,r15)		! reset PC to rewind point #2
-	!
-1:
-#endif
-	mov.l	1f, r1
-	jsr	@r1				! schedule
-	 nop
-	CLI()
-	!
-	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
-	tst	#_TIF_WORK_MASK, r0
-	bt	restore_all
-	bra	work_pending
-	 tst	#_TIF_NEED_RESCHED, r0
-
-	.align	2
-1:	.long	schedule
-2:	.long	do_notify_resume
-
-	.align	2
-syscall_exit_work:
-	! r0: current_thread_info->flags
-	! r8: current_thread_info
-	tst	#_TIF_SYSCALL_TRACE, r0
-	bt/s	work_pending
-	 tst	#_TIF_NEED_RESCHED, r0
-	STI()
-	! XXX setup arguments...
-	mov.l	4f, r0			! do_syscall_trace
-	jsr	@r0
-	 nop
-	bra	resume_userspace
-	 nop
-
-	.align	2
-syscall_trace_entry:
-	!                     	Yes it is traced.
-	! XXX setup arguments...
-	mov.l	4f, r11		! Call do_syscall_trace which notifies
-	jsr	@r11	    	! superior (will chomp R[0-7])
-	 nop
-	!			Reload R0-R4 from kernel stack, where the
-	!   	    	    	parent may have modified them using
-	!   	    	    	ptrace(POKEUSR).  (Note that R0-R2 are
-	!   	    	    	used by the system call handler directly
-	!   	    	    	from the kernel stack anyway, so don't need
-	!   	    	    	to be reloaded here.)  This allows the parent
-	!   	    	    	to rewrite system calls and args on the fly.
-	mov.l	@(OFF_R4,r15), r4   ! arg0
-	mov.l	@(OFF_R5,r15), r5
-	mov.l	@(OFF_R6,r15), r6
-	mov.l	@(OFF_R7,r15), r7   ! arg3
-	mov.l	@(OFF_R3,r15), r3   ! syscall_nr
-	!   	    	    Arrange for do_syscall_trace to be called
-	!   	    	    again as the system call returns.
-	mov.l	2f, r10			! Number of syscalls
-	cmp/hs	r10, r3
-	bf	syscall_call
-	mov	#-ENOSYS, r0
-	bra	syscall_exit
-	 mov.l	r0, @(OFF_R0,r15)	! Return value
-
-/*
- * Syscall interface:
- *
- *	Syscall #: R3
- *	Arguments #0 to #3: R4--R7
- *	Arguments #4 to #6: R0, R1, R2
- *	TRA: (number of arguments + 0x10) x 4
- *
- * This code also handles delegating other traps to the BIOS/gdb stub
- * according to:
- *
- * Trap number
- * (TRA>>2) 	    Purpose
- * -------- 	    -------
- * 0x0-0xf  	    old syscall ABI
- * 0x10-0x1f  	    new syscall ABI
- * 0x20-0xff  	    delegated through debug_trap to BIOS/gdb stub.
- *
- * Note: When we're first called, the TRA value must be shifted
- * right 2 bits in order to get the value that was used as the "trapa"
- * argument.
- */
-
-	.align	2
-	.globl	ret_from_fork
-ret_from_fork:
-	mov.l	1f, r8
-	jsr	@r8
-	 mov	r0, r4
-	bra	syscall_exit
-	 nop
-	.align	2
-1:	.long	schedule_tail
-	!
-ENTRY(system_call)
-	mov.l	1f, r9
-	mov.l	@r9, r8		! Read from TRA (Trap Address) Register
-	!
-	! Is the trap argument >= 0x20? (TRA will be >= 0x80)
-	mov	#0x7f, r9
-	cmp/hi	r9, r8
-	bt/s	0f
-	 mov	#OFF_TRA, r9
-	add	r15, r9
-	!
-	mov.l	r8, @r9			! set TRA value to tra
-	STI()
-	!   	    	    Call the system call handler through the table.
-	!   	    	    First check for bad syscall number
-	mov	r3, r9
-	mov.l	2f, r8			! Number of syscalls
-	cmp/hs	r8, r9
-	bf/s	good_system_call
-	 GET_THREAD_INFO(r8)
-syscall_badsys:			! Bad syscall number
-	mov	#-ENOSYS, r0
-	bra	resume_userspace
-	 mov.l	r0, @(OFF_R0,r15)	! Return value
-	!
-0:
-	bra	debug_trap
-	 nop
-	!
-good_system_call:		! Good syscall number
-	mov.l	@(TI_FLAGS,r8), r8
-	mov	#_TIF_SYSCALL_TRACE, r10
-	tst	r10, r8
-	bf	syscall_trace_entry
-	!
-syscall_call:
-	shll2	r9		! x4
-	mov.l	3f, r8		! Load the address of sys_call_table
-	add	r8, r9
-	mov.l	@r9, r8
-	jsr	@r8	    	! jump to specific syscall handler
-	 nop
-	mov.l	@(OFF_R0,r15), r12		! save r0
-	mov.l	r0, @(OFF_R0,r15)		! save the return value
-	!
-syscall_exit:
-	CLI()
-	!
-	GET_THREAD_INFO(r8)
-	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
-	tst	#_TIF_ALLWORK_MASK, r0
-	bf	syscall_exit_work
-restore_all:
-	mov.l	@r15+, r0
-	mov.l	@r15+, r1
-	mov.l	@r15+, r2
-	mov.l	@r15+, r3
-	mov.l	@r15+, r4
-	mov.l	@r15+, r5
-	mov.l	@r15+, r6
-	mov.l	@r15+, r7
-	!
-	stc	sr, r8
-	mov.l	7f, r9
-	or	r9, r8			! BL =1, RB=1
-	ldc	r8, sr			! here, change the register bank
-	!
-	mov.l	@r15+, r8
-	mov.l	@r15+, r9
-	mov.l	@r15+, r10
-	mov.l	@r15+, r11
-	mov.l	@r15+, r12
-	mov.l	@r15+, r13
-	mov.l	@r15+, r14
-	mov.l	@r15+, k4		! original stack pointer
-	ldc.l	@r15+, spc
-	lds.l	@r15+, pr
-	mov.l	@r15+, k3		! original SR
-	ldc.l	@r15+, gbr
-	lds.l	@r15+, mach
-	lds.l	@r15+, macl
-	add	#4, r15			! Skip syscall number
-	!
-#ifdef CONFIG_SH_DSP
-	mov.l	@r15+, k0		! DSP mode marker
-	mov.l	5f, k1
-	cmp/eq	k0, k1			! Do we have a DSP stack frame?
-	bf	skip_restore
-
-	stc	sr, k0			! Enable CPU DSP mode
-	or	k1, k0			! (within kernel it may be disabled)
-	ldc	k0, sr
-	mov	r2, k0			! Backup r2
-
-	! Restore DSP registers from stack
-	mov	r15, r2
-	movs.l	@r2+, a1
-	movs.l	@r2+, a0g
-	movs.l	@r2+, a1g
-	movs.l	@r2+, m0
-	movs.l	@r2+, m1
-	mov	r2, r15
-
-	lds.l	@r15+, a0
-	lds.l	@r15+, x0
-	lds.l	@r15+, x1
-	lds.l	@r15+, y0
-	lds.l	@r15+, y1
-	lds.l	@r15+, dsr
-	ldc.l	@r15+, rs
-	ldc.l	@r15+, re
-	ldc.l	@r15+, mod
-
-	mov	k0, r2			! Restore r2
-skip_restore:
-#endif
-	!
-	! Calculate new SR value
-	mov	k3, k2			! original SR value
-	mov.l	9f, k1
-	and	k1, k2			! Mask orignal SR value
-	!
-	mov	k3, k0			! Calculate IMASK-bits
-	shlr2	k0
-	and	#0x3c, k0
-	cmp/eq	#0x3c, k0
-	bt/s	6f
-	 shll2	k0
-	mov	g_imask, k0
-	!
-6:	or	k0, k2			! Set the IMASK-bits
-	ldc	k2, ssr
-	!
-#if defined(CONFIG_KGDB_NMI)
-	! Clear in_nmi
-	mov.l	6f, k0
-	mov	#0, k1
-	mov.b	k1, @k0
-#endif
-	mov.l	@r15+, k2		! restore EXPEVT
-	mov	k4, r15
-	rte
-	 nop
-
-	.align	2
-1:	.long	TRA
-2:	.long	NR_syscalls
-3:	.long	sys_call_table
-4:	.long	do_syscall_trace
-5:	.long	0x00001000	! DSP
-7:	.long	0x30000000
-9:
-__INV_IMASK:
-	.long	0xffffff0f	! ~(IMASK)
-
-! Exception Vector Base
-!
-!	Should be aligned page boundary.
-!
-	.balign 	4096,0,4096
-ENTRY(vbr_base)
-	.long	0
-!
-	.balign 	256,0,256
-general_exception:
-	mov.l	1f, k2
-	mov.l	2f, k3
-	bra	handle_exception
-	 mov.l	@k2, k2
-	.align	2
-1:	.long	EXPEVT
-2:	.long	ret_from_exception
-!
-!
-	.balign 	1024,0,1024
-tlb_miss:
-	mov.l	1f, k2
-	mov.l	4f, k3
-	bra	handle_exception
-	 mov.l	@k2, k2
-!
-	.balign 	512,0,512
-interrupt:
-	mov.l	2f, k2
-	mov.l	3f, k3
-#if defined(CONFIG_KGDB_NMI)
-	! Debounce (filter nested NMI)
-	mov.l	@k2, k0
-	mov.l	5f, k1
-	cmp/eq	k1, k0
-	bf	0f
-	mov.l	6f, k1
-	tas.b	@k1
-	bt	0f
-	rte
-	 nop
-	.align	2
-5:	.long	NMI_VEC
-6:	.long	in_nmi
-0:
-#endif /* defined(CONFIG_KGDB_NMI) */
-	bra	handle_exception
-	 mov	#-1, k2		! interrupt exception marker
-
-	.align	2
-1:	.long	EXPEVT
-2:	.long	INTEVT
-3:	.long	ret_from_irq
-4:	.long	ret_from_exception
-
-!
-!
-	.align	2
-ENTRY(handle_exception)
-	! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
-	! save all registers onto stack.
-	!
-	stc	ssr, k0		! Is it from kernel space?
-	shll	k0		! Check MD bit (bit30) by shifting it into...
-	shll	k0		!       ...the T bit
-	bt/s	1f		! It's a kernel to kernel transition.
-	 mov	r15, k0		! save original stack to k0
-	/* User space to kernel */
-	mov	#(THREAD_SIZE >> 8), k1
-	shll8	k1		! k1 := THREAD_SIZE
-	add	current, k1
-	mov	k1, r15		! change to kernel stack
-	!
-1:	mov.l	2f, k1
-	!
-#ifdef CONFIG_SH_DSP
-	mov.l	r2, @-r15		! Save r2, we need another reg
-	stc	sr, k4
-	mov.l	1f, r2
-	tst	r2, k4			! Check if in DSP mode
-	mov.l	@r15+, r2		! Restore r2 now
-	bt/s	skip_save
-	 mov	#0, k4			! Set marker for no stack frame
-
-	mov	r2, k4			! Backup r2 (in k4) for later
-
-	! Save DSP registers on stack
-	stc.l	mod, @-r15
-	stc.l	re, @-r15
-	stc.l	rs, @-r15
-	sts.l	dsr, @-r15
-	sts.l	y1, @-r15
-	sts.l	y0, @-r15
-	sts.l	x1, @-r15
-	sts.l	x0, @-r15
-	sts.l	a0, @-r15
-
-	! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
-
-	! FIXME: Make sure that this is still the case with newer toolchains,
-	! as we're not at all interested in supporting ancient toolchains at
-	! this point. -- PFM.
-
-	mov	r15, r2
-	.word	0xf653			! movs.l	a1, @-r2
-	.word	0xf6f3			! movs.l	a0g, @-r2
-	.word	0xf6d3			! movs.l	a1g, @-r2
-	.word	0xf6c3			! movs.l	m0, @-r2
-	.word	0xf6e3			! movs.l	m1, @-r2
-	mov	r2, r15
-
-	mov	k4, r2			! Restore r2
-	mov.l	1f, k4			! Force DSP stack frame
-skip_save:
-	mov.l	k4, @-r15		! Push DSP mode marker onto stack
-#endif
-	! Save the user registers on the stack.
-	mov.l	k2, @-r15	! EXPEVT
-
- 	mov	#-1, k4
-	mov.l	k4, @-r15	! set TRA (default: -1)
-	!
-	sts.l	macl, @-r15
-	sts.l	mach, @-r15
-	stc.l	gbr, @-r15
-	stc.l	ssr, @-r15
-	sts.l	pr, @-r15
-	stc.l	spc, @-r15
-	!
-	lds	k3, pr		! Set the return address to pr
-	!
-	mov.l	k0, @-r15	! save orignal stack
-	mov.l	r14, @-r15
-	mov.l	r13, @-r15
-	mov.l	r12, @-r15
-	mov.l	r11, @-r15
-	mov.l	r10, @-r15
-	mov.l	r9, @-r15
-	mov.l	r8, @-r15
-	!
-	stc	sr, r8		! Back to normal register bank, and
-	or	k1, r8		! Block all interrupts
-	mov.l	3f, k1
-	and	k1, r8		! ...
-	ldc	r8, sr		! ...changed here.
-	!
-	mov.l	r7, @-r15
-	mov.l	r6, @-r15
-	mov.l	r5, @-r15
-	mov.l	r4, @-r15
-	mov.l	r3, @-r15
-	mov.l	r2, @-r15
-	mov.l	r1, @-r15
-	mov.l	r0, @-r15
-
-	/*
-	 * This gets a bit tricky.. in the INTEVT case we don't want to use
-	 * the VBR offset as a destination in the jump call table, since all
-	 * of the destinations are the same. In this case, (interrupt) sets
-	 * a marker in r2 (now r2_bank since SR.RB changed), which we check
-	 * to determine the exception type. For all other exceptions, we
-	 * forcibly read EXPEVT from memory and fix up the jump address, in
-	 * the interrupt exception case we jump to do_IRQ() and defer the
-	 * INTEVT read until there. As a bonus, we can also clean up the SR.RB
-	 * checks that do_IRQ() was doing..
-	 */
-	stc	r2_bank, r8
-	cmp/pz	r8
-	bf	interrupt_exception
-	shlr2	r8
-	shlr	r8
-	mov.l	4f, r9
-	add	r8, r9
-	mov.l	@r9, r9
-	jmp	@r9
-	 nop
-	rts
-	 nop
-
-	.align	2
-1:	.long	0x00001000	! DSP=1
-2:	.long	0x000080f0	! FD=1, IMASK=15
-3:	.long	0xcfffffff	! RB=0, BL=0
-4:	.long	exception_handling_table
-
-interrupt_exception:
-	mov.l	1f, r9
-	jmp	@r9
-	 nop
-	rts
-	 nop
-
-	.align 2
-1:	.long	do_IRQ
-
-	.align	2
-ENTRY(exception_none)
-	rts
-	 nop
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index f5f53d1..6aca4bc 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -33,7 +33,7 @@
 	.long	0x00360000	/* INITRD_START */
 	.long	0x000a0000	/* INITRD_SIZE */
 	.long	0
-	.balign 4096,0,4096
+	.balign PAGE_SIZE,0,PAGE_SIZE
 
 	.text	
 /*
@@ -53,8 +53,10 @@
 	ldc	r0, sr
 	!			Initialize global interrupt mask
 	mov	#0, r0
+#ifdef CONFIG_CPU_HAS_SR_RB
 	ldc	r0, r6_bank
-
+#endif
+	
 	/*
 	 * Prefetch if possible to reduce cache miss penalty.
 	 *
@@ -68,11 +70,14 @@
 	!
 	mov.l	2f, r0
 	mov	r0, r15		! Set initial r15 (stack pointer)
-	mov	#(THREAD_SIZE >> 8), r1
+	mov	#(THREAD_SIZE >> 10), r1
 	shll8	r1		! r1 = THREAD_SIZE
+	shll2	r1
 	sub	r1, r0		!
+#ifdef CONFIG_CPU_HAS_SR_RB
 	ldc	r0, r7_bank	! ... and initial thread_info
-
+#endif
+	
 	!			Clear BSS area
 	mov.l	3f, r1
 	add	#4, r1
@@ -95,7 +100,11 @@
 	 nop
 
 	.balign 4
+#if defined(CONFIG_CPU_SH2)
+1:	.long	0x000000F0		! IMASK=0xF
+#else
 1:	.long	0x400080F0		! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
+#endif
 2:	.long	init_thread_union+THREAD_SIZE
 3:	.long	__bss_start
 4:	.long	_end
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 944128c..67be2b6 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -12,7 +12,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/io.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/thread_info.h>
@@ -78,15 +78,16 @@
 	u32			stack[THREAD_SIZE/sizeof(u32)];
 };
 
-static union irq_ctx *hardirq_ctx[NR_CPUS];
-static union irq_ctx *softirq_ctx[NR_CPUS];
+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
 #endif
 
 asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
 		      unsigned long r6, unsigned long r7,
-		      struct pt_regs regs)
+		      struct pt_regs __regs)
 {
-	struct pt_regs *old_regs = set_irq_regs(&regs);
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	struct pt_regs *old_regs = set_irq_regs(regs);
 	int irq;
 #ifdef CONFIG_4KSTACKS
 	union irq_ctx *curctx, *irqctx;
@@ -111,7 +112,7 @@
 #endif
 
 #ifdef CONFIG_CPU_HAS_INTEVT
-	irq = (ctrl_inl(INTEVT) >> 5) - 16;
+	irq = evt2irq(ctrl_inl(INTEVT));
 #else
 	irq = r4;
 #endif
@@ -135,17 +136,24 @@
 		irqctx->tinfo.task = curctx->tinfo.task;
 		irqctx->tinfo.previous_sp = current_stack_pointer;
 
+		/*
+		 * Copy the softirq bits in preempt_count so that the
+		 * softirq checks work in the hardirq context.
+		 */
+		irqctx->tinfo.preempt_count =
+			(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+			(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
 		__asm__ __volatile__ (
 			"mov	%0, r4		\n"
-			"mov	r15, r9		\n"
+			"mov	r15, r8		\n"
 			"jsr	@%1		\n"
 			/* swith to the irq stack */
 			" mov	%2, r15		\n"
 			/* restore the stack (ring zero) */
-			"mov	r9, r15		\n"
+			"mov	r8, r15		\n"
 			: /* no outputs */
 			: "r" (irq), "r" (generic_handle_irq), "r" (isp)
-			/* XXX: A somewhat excessive clobber list? -PFM */
 			: "memory", "r0", "r1", "r2", "r3", "r4",
 			  "r5", "r6", "r7", "r8", "t", "pr"
 		);
@@ -193,7 +201,7 @@
 	irqctx->tinfo.task		= NULL;
 	irqctx->tinfo.exec_domain	= NULL;
 	irqctx->tinfo.cpu		= cpu;
-	irqctx->tinfo.preempt_count	= SOFTIRQ_OFFSET;
+	irqctx->tinfo.preempt_count	= 0;
 	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0);
 
 	softirq_ctx[cpu] = irqctx;
@@ -239,13 +247,38 @@
 			"mov	r9, r15		\n"
 			: /* no outputs */
 			: "r" (__do_softirq), "r" (isp)
-			/* XXX: A somewhat excessive clobber list? -PFM */
 			: "memory", "r0", "r1", "r2", "r3", "r4",
 			  "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
 		);
+
+		/*
+		 * Shouldnt happen, we returned above if in_interrupt():
+		 */
+		WARN_ON_ONCE(softirq_count());
 	}
 
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL(do_softirq);
 #endif
+
+void __init init_IRQ(void)
+{
+#ifdef CONFIG_CPU_HAS_PINT_IRQ
+	init_IRQ_pint();
+#endif
+
+#ifdef CONFIG_CPU_HAS_INTC2_IRQ
+	init_IRQ_intc2();
+#endif
+
+#ifdef CONFIG_CPU_HAS_IPR_IRQ
+	init_IRQ_ipr();
+#endif
+
+	/* Perform the machine specific initialisation */
+	if (sh_mv.mv_init_irq)
+		sh_mv.mv_init_irq();
+
+	irq_ctx_init(smp_processor_id());
+}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index a52b13a..f3e2631 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -385,10 +385,11 @@
 
 asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
 			unsigned long r6, unsigned long r7,
-			struct pt_regs regs)
+			struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 #ifdef CONFIG_MMU
-	return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL);
+	return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
 #else
 	/* fork almost works, enough to trick you into looking elsewhere :-( */
 	return -EINVAL;
@@ -398,11 +399,12 @@
 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
 			 unsigned long parent_tidptr,
 			 unsigned long child_tidptr,
-			 struct pt_regs regs)
+			 struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	if (!newsp)
-		newsp = regs.regs[15];
-	return do_fork(clone_flags, newsp, &regs, 0,
+		newsp = regs->regs[15];
+	return do_fork(clone_flags, newsp, regs, 0,
 			(int __user *)parent_tidptr, (int __user *)child_tidptr);
 }
 
@@ -418,9 +420,10 @@
  */
 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
 			 unsigned long r6, unsigned long r7,
-			 struct pt_regs regs)
+			 struct pt_regs __regs)
 {
-	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs,
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
 		       0, NULL, NULL);
 }
 
@@ -429,8 +432,9 @@
  */
 asmlinkage int sys_execve(char *ufilename, char **uargv,
 			  char **uenvp, unsigned long r7,
-			  struct pt_regs regs)
+			  struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	int error;
 	char *filename;
 
@@ -442,7 +446,7 @@
 	error = do_execve(filename,
 			  (char __user * __user *)uargv,
 			  (char __user * __user *)uenvp,
-			  &regs);
+			  regs);
 	if (error == 0) {
 		task_lock(current);
 		current->ptrace &= ~PT_DTRACE;
@@ -472,9 +476,7 @@
 	return pc;
 }
 
-asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
-				 unsigned long r6, unsigned long r7,
-				 struct pt_regs regs)
+asmlinkage void break_point_trap(void)
 {
 	/* Clear tracing.  */
 #if defined(CONFIG_CPU_SH4A)
@@ -492,8 +494,10 @@
 
 asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
 					  unsigned long r6, unsigned long r7,
-					  struct pt_regs regs)
+					  struct pt_regs __regs)
 {
-	regs.pc -= 2;
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+
+	regs->pc -= 2;
 	force_sig(SIGTRAP, current);
 }
diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S
index 8221b37..c66cb32 100644
--- a/arch/sh/kernel/relocate_kernel.S
+++ b/arch/sh/kernel/relocate_kernel.S
@@ -7,11 +7,9 @@
  * This source code is licensed under the GNU General Public License,
  * Version 2.  See the file COPYING for more details.
  */
-
 #include <linux/linkage.h>
-
-#define PAGE_SIZE      4096 /* must be same value as in <asm/page.h> */
-
+#include <asm/addrspace.h>
+#include <asm/page.h>
 
 		.globl relocate_new_kernel
 relocate_new_kernel:
@@ -20,8 +18,8 @@
 	/* r6 = start_address      */
 	/* r7 = vbr_reg            */
 
-	mov.l	10f,r8	  /* 4096 */
-	mov.l	11f,r9    /* 0xa0000000 */
+	mov.l	10f,r8	  /* PAGE_SIZE */
+	mov.l	11f,r9    /* P2SEG */
 
 	/*  stack setting */
 	add	r8,r5
@@ -32,7 +30,7 @@
 0:
 	mov.l	@r4+,r0	  /* cmd = *ind++ */
 
-1:	/* addr = (cmd | 0xa0000000) & 0xfffffff0 */
+1:	/* addr = (cmd | P2SEG) & 0xfffffff0 */
 	mov	r0,r2
 	or	r9,r2
 	mov	#-16,r1
@@ -92,7 +90,7 @@
 10:
 	.long	PAGE_SIZE
 11:
-	.long	0xa0000000
+	.long	P2SEG
 
 relocate_new_kernel_end:
 
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 36d86f9..f8dd6b7 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -332,8 +332,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
 			reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE);
-			initrd_start =
-				INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
+			initrd_start = INITRD_START + PAGE_OFFSET + __MEMORY_START;
 			initrd_end = initrd_start + INITRD_SIZE;
 		} else {
 			printk("initrd extends beyond end of memory "
@@ -392,6 +391,7 @@
 subsys_initcall(topology_init);
 
 static const char *cpu_name[] = {
+	[CPU_SH7206]	= "SH7206",	[CPU_SH7619]	= "SH7619",
 	[CPU_SH7604]	= "SH7604",	[CPU_SH7300]	= "SH7300",
 	[CPU_SH7705]	= "SH7705",	[CPU_SH7706]	= "SH7706",
 	[CPU_SH7707]	= "SH7707",	[CPU_SH7708]	= "SH7708",
@@ -404,6 +404,7 @@
 	[CPU_SH4_202]	= "SH4-202",	[CPU_SH4_501]	= "SH4-501",
 	[CPU_SH7770]	= "SH7770",	[CPU_SH7780]	= "SH7780",
 	[CPU_SH7781]	= "SH7781",	[CPU_SH7343]	= "SH7343",
+	[CPU_SH7785]	= "SH7785",
 	[CPU_SH_NONE]	= "Unknown"
 };
 
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index 8a2fd19..ceee791 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -73,8 +73,6 @@
 DECLARE_EXPORT(__movstr);
 DECLARE_EXPORT(__movstrSI16);
 
-EXPORT_SYMBOL(strcpy);
-
 #ifdef CONFIG_CPU_SH4
 DECLARE_EXPORT(__movstr_i4_even);
 DECLARE_EXPORT(__movstr_i4_odd);
@@ -101,10 +99,6 @@
 EXPORT_SYMBOL(synchronize_irq);
 #endif
 
-#ifdef CONFIG_PM
-EXPORT_SYMBOL(pm_suspend);
-#endif
-
 EXPORT_SYMBOL(csum_partial);
 #ifdef CONFIG_IPV6
 EXPORT_SYMBOL(csum_ipv6_magic);
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 5213f5b..bb1c480 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -23,6 +23,7 @@
 #include <linux/elf.h>
 #include <linux/personality.h>
 #include <linux/binfmts.h>
+#include <linux/freezer.h>
 
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
@@ -37,7 +38,7 @@
 asmlinkage int
 sys_sigsuspend(old_sigset_t mask,
 	       unsigned long r5, unsigned long r6, unsigned long r7,
-	       struct pt_regs regs)
+	       struct pt_regs __regs)
 {
 	mask &= _BLOCKABLE;
 	spin_lock_irq(&current->sighand->siglock);
@@ -52,7 +53,7 @@
 	return -ERESTARTNOHAND;
 }
 
-asmlinkage int 
+asmlinkage int
 sys_sigaction(int sig, const struct old_sigaction __user *act,
 	      struct old_sigaction __user *oact)
 {
@@ -87,9 +88,11 @@
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
 		unsigned long r6, unsigned long r7,
-		struct pt_regs regs)
+		struct pt_regs __regs)
 {
-	return do_sigaltstack(uss, uoss, regs.regs[15]);
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+
+	return do_sigaltstack(uss, uoss, regs->regs[15]);
 }
 
 
@@ -98,7 +101,11 @@
  */
 
 #define MOVW(n)	 (0x9300|((n)-2))	/* Move mem word at PC+n to R3 */
-#define TRAP16	 0xc310			/* Syscall w/no args (NR in R3) */
+#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
+#define TRAP_NOARG 0xc320		/* Syscall w/no args (NR in R3) */
+#else
+#define TRAP_NOARG 0xc310		/* Syscall w/no args (NR in R3) */
+#endif
 #define OR_R0_R0 0x200b			/* or r0,r0 (insert to avoid hardware bug) */
 
 struct sigframe
@@ -194,9 +201,10 @@
 
 asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
 			     unsigned long r6, unsigned long r7,
-			     struct pt_regs regs)
+			     struct pt_regs __regs)
 {
-	struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15];
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
 	sigset_t set;
 	int r0;
 
@@ -216,7 +224,7 @@
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 
-	if (restore_sigcontext(&regs, &frame->sc, &r0))
+	if (restore_sigcontext(regs, &frame->sc, &r0))
 		goto badframe;
 	return r0;
 
@@ -227,9 +235,10 @@
 
 asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
 				unsigned long r6, unsigned long r7,
-				struct pt_regs regs)
+				struct pt_regs __regs)
 {
-	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15];
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
 	sigset_t set;
 	stack_t st;
 	int r0;
@@ -246,14 +255,14 @@
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 
-	if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &r0))
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
 		goto badframe;
 
 	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
 		goto badframe;
 	/* It is more difficult to avoid calling this function than to
 	   call it and ignore errors.  */
-	do_sigaltstack(&st, NULL, regs.regs[15]);
+	do_sigaltstack(&st, NULL, regs->regs[15]);
 
 	return r0;
 
@@ -350,7 +359,7 @@
 	} else {
 		/* Generate return code (system call to sigreturn) */
 		err |= __put_user(MOVW(7), &frame->retcode[0]);
-		err |= __put_user(TRAP16, &frame->retcode[1]);
+		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
@@ -430,7 +439,7 @@
 	} else {
 		/* Generate return code (system call to rt_sigreturn) */
 		err |= __put_user(MOVW(7), &frame->retcode[0]);
-		err |= __put_user(TRAP16, &frame->retcode[1]);
+		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
new file mode 100644
index 0000000..0d5268a
--- /dev/null
+++ b/arch/sh/kernel/stacktrace.c
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/kernel/stacktrace.c
+ *
+ * Stack trace management functions
+ *
+ *  Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <asm/ptrace.h>
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+{
+	unsigned long *sp;
+
+	if (!task)
+		task = current;
+	if (task == current)
+		sp = (unsigned long *)current_stack_pointer;
+	else
+		sp = (unsigned long *)task->thread.sp;
+
+	while (!kstack_end(sp)) {
+		unsigned long addr = *sp++;
+
+		if (__kernel_text_address(addr)) {
+			if (trace->skip > 0)
+				trace->skip--;
+			else
+				trace->entries[trace->nr_entries++] = addr;
+			if (trace->nr_entries >= trace->max_entries)
+				break;
+		}
+	}
+}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 8fde950..5083b6e 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -33,14 +33,15 @@
  */
 asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
 	unsigned long r6, unsigned long r7,
-	struct pt_regs regs)
+	struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	int fd[2];
 	int error;
 
 	error = do_pipe(fd);
 	if (!error) {
-		regs.regs[1] = fd[1];
+		regs->regs[1] = fd[1];
 		return fd[0];
 	}
 	return error;
@@ -50,6 +51,7 @@
 
 EXPORT_SYMBOL(shm_align_mask);
 
+#ifdef CONFIG_MMU
 /*
  * To avoid cache aliases, we map the shared page with same color.
  */
@@ -135,6 +137,7 @@
 			addr = COLOUR_ALIGN(addr, pgoff);
 	}
 }
+#endif /* CONFIG_MMU */
 
 static inline long
 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 57e708d..c206c95 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -13,6 +13,8 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/profile.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
 #include <asm/clock.h>
 #include <asm/rtc.h>
 #include <asm/timer.h>
@@ -50,15 +52,20 @@
 #ifndef CONFIG_GENERIC_TIME
 void do_gettimeofday(struct timeval *tv)
 {
+	unsigned long flags;
 	unsigned long seq;
 	unsigned long usec, sec;
 
 	do {
-		seq = read_seqbegin(&xtime_lock);
+		/*
+		 * Turn off IRQs when grabbing xtime_lock, so that
+		 * the sys_timer get_offset code doesn't have to handle it.
+		 */
+		seq = read_seqbegin_irqsave(&xtime_lock, flags);
 		usec = get_timer_offset();
 		sec = xtime.tv_sec;
-		usec += xtime.tv_nsec / 1000;
-	} while (read_seqretry(&xtime_lock, seq));
+		usec += xtime.tv_nsec / NSEC_PER_USEC;
+	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
 
 	while (usec >= 1000000) {
 		usec -= 1000000;
@@ -85,7 +92,7 @@
 	 * wall time.  Discover what correction gettimeofday() would have
 	 * made, and then undo it!
 	 */
-	nsec -= 1000 * get_timer_offset();
+	nsec -= get_timer_offset() * NSEC_PER_USEC;
 
 	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
 	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -169,6 +176,108 @@
 	.resume	 = timer_resume,
 };
 
+#ifdef CONFIG_NO_IDLE_HZ
+static int timer_dyn_tick_enable(void)
+{
+	struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+	unsigned long flags;
+	int ret = -ENODEV;
+
+	if (dyn_tick) {
+		spin_lock_irqsave(&dyn_tick->lock, flags);
+		ret = 0;
+		if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
+			ret = dyn_tick->enable();
+
+			if (ret == 0)
+				dyn_tick->state |= DYN_TICK_ENABLED;
+		}
+		spin_unlock_irqrestore(&dyn_tick->lock, flags);
+	}
+
+	return ret;
+}
+
+static int timer_dyn_tick_disable(void)
+{
+	struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+	unsigned long flags;
+	int ret = -ENODEV;
+
+	if (dyn_tick) {
+		spin_lock_irqsave(&dyn_tick->lock, flags);
+		ret = 0;
+		if (dyn_tick->state & DYN_TICK_ENABLED) {
+			ret = dyn_tick->disable();
+
+			if (ret == 0)
+				dyn_tick->state &= ~DYN_TICK_ENABLED;
+		}
+		spin_unlock_irqrestore(&dyn_tick->lock, flags);
+	}
+
+	return ret;
+}
+
+/*
+ * Reprogram the system timer for at least the calculated time interval.
+ * This function should be called from the idle thread with IRQs disabled,
+ * immediately before sleeping.
+ */
+void timer_dyn_reprogram(void)
+{
+	struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+	unsigned long next, seq, flags;
+
+	if (!dyn_tick)
+		return;
+
+	spin_lock_irqsave(&dyn_tick->lock, flags);
+	if (dyn_tick->state & DYN_TICK_ENABLED) {
+		next = next_timer_interrupt();
+		do {
+			seq = read_seqbegin(&xtime_lock);
+			dyn_tick->reprogram(next - jiffies);
+		} while (read_seqretry(&xtime_lock, seq));
+	}
+	spin_unlock_irqrestore(&dyn_tick->lock, flags);
+}
+
+static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf)
+{
+	return sprintf(buf, "%i\n",
+		       (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1);
+}
+
+static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf,
+				  size_t count)
+{
+	unsigned int enable = simple_strtoul(buf, NULL, 2);
+
+	if (enable)
+		timer_dyn_tick_enable();
+	else
+		timer_dyn_tick_disable();
+
+	return count;
+}
+static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick);
+
+/*
+ * dyntick=enable|disable
+ */
+static char dyntick_str[4] __initdata = "";
+
+static int __init dyntick_setup(char *str)
+{
+	if (str)
+		strlcpy(dyntick_str, str, sizeof(dyntick_str));
+	return 1;
+}
+
+__setup("dyntick=", dyntick_setup);
+#endif
+
 static int __init timer_init_sysfs(void)
 {
 	int ret = sysdev_class_register(&timer_sysclass);
@@ -176,7 +285,22 @@
 		return ret;
 
 	sys_timer->dev.cls = &timer_sysclass;
-	return sysdev_register(&sys_timer->dev);
+	ret = sysdev_register(&sys_timer->dev);
+
+#ifdef CONFIG_NO_IDLE_HZ
+	if (ret == 0 && sys_timer->dyn_tick) {
+		ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick);
+
+		/*
+		 * Turn on dynamic tick after calibrate delay
+		 * for correct bogomips
+		 */
+		if (ret == 0 && dyntick_str[0] == 'e')
+			ret = timer_dyn_tick_enable();
+	}
+#endif
+
+	return ret;
 }
 device_initcall(timer_init_sysfs);
 
@@ -200,6 +324,11 @@
 	sys_timer = get_sys_timer();
 	printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
 
+#ifdef CONFIG_NO_IDLE_HZ
+	if (sys_timer->dyn_tick)
+		spin_lock_init(&sys_timer->dyn_tick->lock);
+#endif
+
 #if defined(CONFIG_SH_KGDB)
 	/*
 	 * Set up kgdb as requested. We do it here because the serial
diff --git a/arch/sh/kernel/timers/Makefile b/arch/sh/kernel/timers/Makefile
index 151a6a3..bcf244f 100644
--- a/arch/sh/kernel/timers/Makefile
+++ b/arch/sh/kernel/timers/Makefile
@@ -5,4 +5,6 @@
 obj-y	:= timer.o
 
 obj-$(CONFIG_SH_TMU)		+= timer-tmu.o
+obj-$(CONFIG_SH_MTU2)		+= timer-mtu2.o
+obj-$(CONFIG_SH_CMT)		+= timer-cmt.o
 
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
new file mode 100644
index 0000000..a574b93
--- /dev/null
+++ b/arch/sh/kernel/timers/timer-cmt.c
@@ -0,0 +1,196 @@
+/*
+ * arch/sh/kernel/timers/timer-cmt.c - CMT Timer Support
+ *
+ *  Copyright (C) 2005  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/seqlock.h>
+#include <asm/timer.h>
+#include <asm/rtc.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/clock.h>
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define CMT_CMSTR	0xf84a0070
+#define CMT_CMCSR_0	0xf84a0072
+#define CMT_CMCNT_0	0xf84a0074
+#define CMT_CMCOR_0	0xf84a0076
+#define CMT_CMCSR_1	0xf84a0078
+#define CMT_CMCNT_1	0xf84a007a
+#define CMT_CMCOR_1	0xf84a007c
+
+#define STBCR3		0xf80a0000
+#define cmt_clock_enable() do {	ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0)
+#define CMT_CMCSR_INIT	0x0040
+#define CMT_CMCSR_CALIB	0x0000
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+#define CMT_CMSTR	0xfffec000
+#define CMT_CMCSR_0	0xfffec002
+#define CMT_CMCNT_0	0xfffec004
+#define CMT_CMCOR_0	0xfffec006
+
+#define STBCR4		0xfffe040c
+#define cmt_clock_enable() do {	ctrl_outb(ctrl_inb(STBCR4) & ~0x04, STBCR4); } while(0)
+#define CMT_CMCSR_INIT	0x0040
+#define CMT_CMCSR_CALIB	0x0000
+#else
+#error "Unknown CPU SUBTYPE"
+#endif
+
+static unsigned long cmt_timer_get_offset(void)
+{
+	int count;
+	static unsigned short count_p = 0xffff;    /* for the first call after boot */
+	static unsigned long jiffies_p = 0;
+
+	/*
+	 * cache volatile jiffies temporarily; we have IRQs turned off.
+	 */
+	unsigned long jiffies_t;
+
+	/* timer count may underflow right here */
+	count =  ctrl_inw(CMT_CMCOR_0);
+	count -= ctrl_inw(CMT_CMCNT_0);
+
+	jiffies_t = jiffies;
+
+	/*
+	 * avoiding timer inconsistencies (they are rare, but they happen)...
+	 * there is one kind of problem that must be avoided here:
+	 *  1. the timer counter underflows
+	 */
+
+	if (jiffies_t == jiffies_p) {
+		if (count > count_p) {
+			/* the nutcase */
+			if (ctrl_inw(CMT_CMCSR_0) & 0x80) { /* Check CMF bit */
+				count -= LATCH;
+			} else {
+				printk("%s (): hardware timer problem?\n",
+				       __FUNCTION__);
+			}
+		}
+	} else
+		jiffies_p = jiffies_t;
+
+	count_p = count;
+
+	count = ((LATCH-1) - count) * TICK_SIZE;
+	count = (count + LATCH/2) / LATCH;
+
+	return count;
+}
+
+static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id)
+{
+	unsigned long timer_status;
+
+	/* Clear CMF bit */
+	timer_status = ctrl_inw(CMT_CMCSR_0);
+	timer_status &= ~0x80;
+	ctrl_outw(timer_status, CMT_CMCSR_0);
+
+	/*
+	 * Here we are in the timer irq handler. We just have irqs locally
+	 * disabled but we don't know if the timer_bh is running on the other
+	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+	 * the irq version of write_lock because as just said we have irq
+	 * locally disabled. -arca
+	 */
+	write_seqlock(&xtime_lock);
+	handle_timer_tick();
+	write_sequnlock(&xtime_lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction cmt_irq = {
+	.name		= "timer",
+	.handler	= cmt_timer_interrupt,
+	.flags		= IRQF_DISABLED | IRQF_TIMER,
+	.mask		= CPU_MASK_NONE,
+};
+
+static void cmt_clk_init(struct clk *clk)
+{
+	u8 divisor = CMT_CMCSR_INIT & 0x3;
+	ctrl_inw(CMT_CMCSR_0);
+	ctrl_outw(CMT_CMCSR_INIT, CMT_CMCSR_0);
+	clk->parent = clk_get(NULL, "module_clk");
+	clk->rate = clk->parent->rate / (8 << (divisor << 1));
+}
+
+static void cmt_clk_recalc(struct clk *clk)
+{
+	u8 divisor = ctrl_inw(CMT_CMCSR_0) & 0x3;
+	clk->rate = clk->parent->rate / (8 << (divisor << 1));
+}
+
+static struct clk_ops cmt_clk_ops = {
+	.init		= cmt_clk_init,
+	.recalc		= cmt_clk_recalc,
+};
+
+static struct clk cmt0_clk = {
+	.name		= "cmt0_clk",
+	.ops		= &cmt_clk_ops,
+};
+
+static int cmt_timer_start(void)
+{
+	ctrl_outw(ctrl_inw(CMT_CMSTR) | 0x01, CMT_CMSTR);
+	return 0;
+}
+
+static int cmt_timer_stop(void)
+{
+	ctrl_outw(ctrl_inw(CMT_CMSTR) & ~0x01, CMT_CMSTR);
+	return 0;
+}
+
+static int cmt_timer_init(void)
+{
+	unsigned long interval;
+
+	cmt_clock_enable();
+
+	setup_irq(CONFIG_SH_TIMER_IRQ, &cmt_irq);
+
+	cmt0_clk.parent = clk_get(NULL, "module_clk");
+
+	cmt_timer_stop();
+
+	interval = cmt0_clk.parent->rate / 8 / HZ;
+	printk(KERN_INFO "Interval = %ld\n", interval);
+
+	ctrl_outw(interval, CMT_CMCOR_0);
+
+	clk_register(&cmt0_clk);
+	clk_enable(&cmt0_clk);
+
+	cmt_timer_start();
+
+	return 0;
+}
+
+struct sys_timer_ops cmt_timer_ops = {
+	.init		= cmt_timer_init,
+	.start		= cmt_timer_start,
+	.stop		= cmt_timer_stop,
+#ifndef CONFIG_GENERIC_TIME
+	.get_offset	= cmt_timer_get_offset,
+#endif
+};
+
+struct sys_timer cmt_timer = {
+	.name	= "cmt",
+	.ops	= &cmt_timer_ops,
+};
diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c
new file mode 100644
index 0000000..fffcd1c
--- /dev/null
+++ b/arch/sh/kernel/timers/timer-mtu2.c
@@ -0,0 +1,200 @@
+/*
+ * arch/sh/kernel/timers/timer-mtu2.c - MTU2 Timer Support
+ *
+ *  Copyright (C) 2005  Paul Mundt
+ *
+ * Based off of arch/sh/kernel/timers/timer-tmu.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/seqlock.h>
+#include <asm/timer.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/clock.h>
+
+/*
+ * We use channel 1 for our lowly system timer. Channel 2 would be the other
+ * likely candidate, but we leave it alone as it has higher divisors that
+ * would be of more use to other more interesting applications.
+ *
+ * TODO: Presently we only implement a 16-bit single-channel system timer.
+ * However, we can implement channel cascade if we go the overflow route and
+ * get away with using 2 MTU2 channels as a 32-bit timer.
+ */
+#define MTU2_TSTR	0xfffe4280
+#define MTU2_TCR_1	0xfffe4380
+#define MTU2_TMDR_1	0xfffe4381
+#define MTU2_TIOR_1	0xfffe4382
+#define MTU2_TIER_1	0xfffe4384
+#define MTU2_TSR_1	0xfffe4385
+#define MTU2_TCNT_1	0xfffe4386	/* 16-bit counter */
+#define MTU2_TGRA_1	0xfffe438a
+
+#define STBCR3		0xfffe0408
+
+#define MTU2_TSTR_CST1	(1 << 1)	/* Counter Start 1 */
+
+#define MTU2_TSR_TGFA	(1 << 0)	/* GRA compare match */
+
+#define MTU2_TIER_TGIEA	(1 << 0)	/* GRA compare match  interrupt enable */
+
+#define MTU2_TCR_INIT	0x22
+
+#define MTU2_TCR_CALIB  0x00
+
+static unsigned long mtu2_timer_get_offset(void)
+{
+	int count;
+	static int count_p = 0x7fff;	/* for the first call after boot */
+	static unsigned long jiffies_p = 0;
+
+	/*
+	 * cache volatile jiffies temporarily; we have IRQs turned off.
+	 */
+	unsigned long jiffies_t;
+
+	/* timer count may underflow right here */
+	count = ctrl_inw(MTU2_TCNT_1);	/* read the latched count */
+
+	jiffies_t = jiffies;
+
+	/*
+	 * avoiding timer inconsistencies (they are rare, but they happen)...
+	 * there is one kind of problem that must be avoided here:
+	 *  1. the timer counter underflows
+	 */
+
+	if (jiffies_t == jiffies_p) {
+		if (count > count_p) {
+			if (ctrl_inb(MTU2_TSR_1) & MTU2_TSR_TGFA) {
+				count -= LATCH;
+			} else {
+				printk("%s (): hardware timer problem?\n",
+				       __FUNCTION__);
+			}
+		}
+	} else
+		jiffies_p = jiffies_t;
+
+	count_p = count;
+
+	count = ((LATCH-1) - count) * TICK_SIZE;
+	count = (count + LATCH/2) / LATCH;
+
+	return count;
+}
+
+static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id)
+{
+	unsigned long timer_status;
+
+	/* Clear TGFA bit */
+	timer_status = ctrl_inb(MTU2_TSR_1);
+	timer_status &= ~MTU2_TSR_TGFA;
+	ctrl_outb(timer_status, MTU2_TSR_1);
+
+	/* Do timer tick */
+	write_seqlock(&xtime_lock);
+	handle_timer_tick();
+	write_sequnlock(&xtime_lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction mtu2_irq = {
+	.name		= "timer",
+	.handler	= mtu2_timer_interrupt,
+	.flags		= IRQF_DISABLED | IRQF_TIMER,
+	.mask		= CPU_MASK_NONE,
+};
+
+static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 };
+
+static void mtu2_clk_init(struct clk *clk)
+{
+	u8 idx = MTU2_TCR_INIT & 0x7;
+
+	clk->rate = clk->parent->rate / divisors[idx];
+	/* Start TCNT counting */
+	ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
+
+}
+
+static void mtu2_clk_recalc(struct clk *clk)
+{
+	u8 idx = ctrl_inb(MTU2_TCR_1) & 0x7;
+	clk->rate = clk->parent->rate / divisors[idx];
+}
+
+static struct clk_ops mtu2_clk_ops = {
+	.init		= mtu2_clk_init,
+	.recalc		= mtu2_clk_recalc,
+};
+
+static struct clk mtu2_clk1 = {
+	.name		= "mtu2_clk1",
+	.ops		= &mtu2_clk_ops,
+};
+
+static int mtu2_timer_start(void)
+{
+	ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
+	return 0;
+}
+
+static int mtu2_timer_stop(void)
+{
+	ctrl_outb(ctrl_inb(MTU2_TSTR) & ~MTU2_TSTR_CST1, MTU2_TSTR);
+	return 0;
+}
+
+static int mtu2_timer_init(void)
+{
+	u8 tmp;
+	unsigned long interval;
+
+	setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq);
+
+	mtu2_clk1.parent = clk_get(NULL, "module_clk");
+
+	ctrl_outb(ctrl_inb(STBCR3) & (~0x20), STBCR3);
+
+	/* Normal operation */
+	ctrl_outb(0, MTU2_TMDR_1);
+	ctrl_outb(MTU2_TCR_INIT, MTU2_TCR_1);
+	ctrl_outb(0x01, MTU2_TIOR_1);
+
+	/* Enable underflow interrupt */
+	ctrl_outb(ctrl_inb(MTU2_TIER_1) | MTU2_TIER_TGIEA, MTU2_TIER_1);
+
+	interval = CONFIG_SH_PCLK_FREQ / 16 / HZ;
+	printk(KERN_INFO "Interval = %ld\n", interval);
+
+	ctrl_outw(interval, MTU2_TGRA_1);
+	ctrl_outw(0, MTU2_TCNT_1);
+
+	clk_register(&mtu2_clk1);
+	clk_enable(&mtu2_clk1);
+
+	return 0;
+}
+
+struct sys_timer_ops mtu2_timer_ops = {
+	.init		= mtu2_timer_init,
+	.start		= mtu2_timer_start,
+	.stop		= mtu2_timer_stop,
+#ifndef CONFIG_GENERIC_TIME
+	.get_offset	= mtu2_timer_get_offset,
+#endif
+};
+
+struct sys_timer mtu2_timer = {
+	.name	= "mtu2",
+	.ops	= &mtu2_timer_ops,
+};
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 2492701..e060e71 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -17,7 +17,6 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
-#include <linux/spinlock.h>
 #include <linux/seqlock.h>
 #include <asm/timer.h>
 #include <asm/rtc.h>
@@ -31,13 +30,9 @@
 
 #define TMU0_TCR_CALIB	0x0000
 
-static DEFINE_SPINLOCK(tmu0_lock);
-
 static unsigned long tmu_timer_get_offset(void)
 {
 	int count;
-	unsigned long flags;
-
 	static int count_p = 0x7fffffff;    /* for the first call after boot */
 	static unsigned long jiffies_p = 0;
 
@@ -46,7 +41,6 @@
 	 */
 	unsigned long jiffies_t;
 
-	spin_lock_irqsave(&tmu0_lock, flags);
 	/* timer count may underflow right here */
 	count = ctrl_inl(TMU0_TCNT);	/* read the latched count */
 
@@ -72,7 +66,6 @@
 		jiffies_p = jiffies_t;
 
 	count_p = count;
-	spin_unlock_irqrestore(&tmu0_lock, flags);
 
 	count = ((LATCH-1) - count) * TICK_SIZE;
 	count = (count + LATCH/2) / LATCH;
@@ -106,7 +99,7 @@
 static struct irqaction tmu_irq = {
 	.name		= "timer",
 	.handler	= tmu_timer_interrupt,
-	.flags		= IRQF_DISABLED,
+	.flags		= IRQF_DISABLED | IRQF_TIMER,
 	.mask		= CPU_MASK_NONE,
 };
 
@@ -149,9 +142,9 @@
 {
 	unsigned long interval;
 
-	setup_irq(TIMER_IRQ, &tmu_irq);
+	setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq);
 
-	tmu0_clk.parent = clk_get("module_clk");
+	tmu0_clk.parent = clk_get(NULL, "module_clk");
 
 	/* Start TMU0 */
 	tmu_timer_stop();
diff --git a/arch/sh/kernel/timers/timer.c b/arch/sh/kernel/timers/timer.c
index dc1f631..a6bcc91 100644
--- a/arch/sh/kernel/timers/timer.c
+++ b/arch/sh/kernel/timers/timer.c
@@ -17,6 +17,12 @@
 #ifdef CONFIG_SH_TMU
 	&tmu_timer,
 #endif
+#ifdef CONFIG_SH_MTU2
+	&mtu2_timer,
+#endif
+#ifdef CONFIG_SH_CMT
+	&cmt_timer,
+#endif
 	NULL,
 };
 
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 53dfa55..3762d9d 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -18,13 +18,14 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/io.h>
+#include <linux/debug_locks.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
 #ifdef CONFIG_SH_KGDB
 #include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs)                 	\
-{       					\
+#define CHK_REMOTE_DEBUG(regs)			\
+{						\
 	if (kgdb_debug_hook && !user_mode(regs))\
 		(*kgdb_debug_hook)(regs);       \
 }
@@ -33,8 +34,13 @@
 #endif
 
 #ifdef CONFIG_CPU_SH2
-#define TRAP_RESERVED_INST	4
-#define TRAP_ILLEGAL_SLOT_INST	6
+# define TRAP_RESERVED_INST	4
+# define TRAP_ILLEGAL_SLOT_INST	6
+# define TRAP_ADDRESS_ERROR	9
+# ifdef CONFIG_CPU_SH2A
+#  define TRAP_DIVZERO_ERROR	17
+#  define TRAP_DIVOVF_ERROR	18
+# endif
 #else
 #define TRAP_RESERVED_INST	12
 #define TRAP_ILLEGAL_SLOT_INST	13
@@ -88,7 +94,7 @@
 
 	if (!user_mode(regs) || in_interrupt())
 		dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
-		 	 (unsigned long)task_stack_page(current));
+			 (unsigned long)task_stack_page(current));
 
 	bust_spinlocks(0);
 	spin_unlock_irq(&die_lock);
@@ -102,8 +108,6 @@
 		die(str, regs, err);
 }
 
-static int handle_unaligned_notify_count = 10;
-
 /*
  * try and fix up kernelspace address errors
  * - userspace errors just cause EFAULT to be returned, resulting in SEGV
@@ -198,7 +202,7 @@
 		if (copy_to_user(dst,src,4))
 			goto fetch_fault;
 		ret = 0;
- 		break;
+		break;
 
 	case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
 		if (instruction & 4)
@@ -222,7 +226,7 @@
 		if (copy_from_user(dst,src,4))
 			goto fetch_fault;
 		ret = 0;
- 		break;
+		break;
 
 	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
 		src = (unsigned char*) *rm;
@@ -230,7 +234,7 @@
 			*rm += count;
 		dst = (unsigned char*) rn;
 		*(unsigned long*)dst = 0;
-		
+
 #ifdef __LITTLE_ENDIAN__
 		if (copy_from_user(dst, src, count))
 			goto fetch_fault;
@@ -241,7 +245,7 @@
 		}
 #else
 		dst += 4-count;
-		
+
 		if (copy_from_user(dst, src, count))
 			goto fetch_fault;
 
@@ -320,7 +324,8 @@
 			return -EFAULT;
 
 		/* kernel */
-		die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
+		die("delay-slot-insn faulting in handle_unaligned_delayslot",
+		    regs, 0);
 	}
 
 	return handle_unaligned_ins(instruction,regs);
@@ -342,6 +347,13 @@
 #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
 #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
 
+/*
+ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
+ * opcodes..
+ */
+#ifndef CONFIG_CPU_SH2A
+static int handle_unaligned_notify_count = 10;
+
 static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
 {
 	u_int rm;
@@ -354,7 +366,8 @@
 	if (user_mode(regs) && handle_unaligned_notify_count>0) {
 		handle_unaligned_notify_count--;
 
-		printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+		printk(KERN_NOTICE "Fixing up unaligned userspace access "
+		       "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
 		       current->comm,current->pid,(u16*)regs->pc,instruction);
 	}
 
@@ -478,32 +491,58 @@
 		regs->pc += 2;
 	return ret;
 }
+#endif /* CONFIG_CPU_SH2A */
+
+#ifdef CONFIG_CPU_HAS_SR_RB
+#define lookup_exception_vector(x)	\
+	__asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
+#else
+#define lookup_exception_vector(x)	\
+	__asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
+#endif
 
 /*
- * Handle various address error exceptions
+ * Handle various address error exceptions:
+ *  - instruction address error:
+ *       misaligned PC
+ *       PC >= 0x80000000 in user mode
+ *  - data address error (read and write)
+ *       misaligned data access
+ *       access to >= 0x80000000 is user mode
+ * Unfortuntaly we can't distinguish between instruction address error
+ * and data address errors caused by read acceses.
  */
-asmlinkage void do_address_error(struct pt_regs *regs, 
+asmlinkage void do_address_error(struct pt_regs *regs,
 				 unsigned long writeaccess,
 				 unsigned long address)
 {
-	unsigned long error_code;
+	unsigned long error_code = 0;
 	mm_segment_t oldfs;
+	siginfo_t info;
+#ifndef CONFIG_CPU_SH2A
 	u16 instruction;
 	int tmp;
+#endif
 
-	asm volatile("stc       r2_bank,%0": "=r" (error_code));
+	/* Intentional ifdef */
+#ifdef CONFIG_CPU_HAS_SR_RB
+	lookup_exception_vector(error_code);
+#endif
 
 	oldfs = get_fs();
 
 	if (user_mode(regs)) {
+		int si_code = BUS_ADRERR;
+
 		local_irq_enable();
-		current->thread.error_code = error_code;
-		current->thread.trap_no = (writeaccess) ? 8 : 7;
 
 		/* bad PC is not something we can fix */
-		if (regs->pc & 1)
+		if (regs->pc & 1) {
+			si_code = BUS_ADRALN;
 			goto uspace_segv;
+		}
 
+#ifndef CONFIG_CPU_SH2A
 		set_fs(USER_DS);
 		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
 			/* Argh. Fault on the instruction itself.
@@ -518,14 +557,23 @@
 
 		if (tmp==0)
 			return; /* sorted */
+#endif
 
-	uspace_segv:
-		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
-		force_sig(SIGSEGV, current);
+uspace_segv:
+		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+		       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+		       regs->pr);
+
+		info.si_signo = SIGBUS;
+		info.si_errno = 0;
+		info.si_code = si_code;
+		info.si_addr = (void *) address;
+		force_sig_info(SIGBUS, &info, current);
 	} else {
 		if (regs->pc & 1)
 			die("unaligned program counter", regs, error_code);
 
+#ifndef CONFIG_CPU_SH2A
 		set_fs(KERNEL_DS);
 		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
 			/* Argh. Fault on the instruction itself.
@@ -537,6 +585,12 @@
 
 		handle_unaligned_access(instruction, regs);
 		set_fs(oldfs);
+#else
+		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+		       "access\n", current->comm);
+
+		force_sig(SIGSEGV, current);
+#endif
 	}
 }
 
@@ -548,7 +602,7 @@
 {
 	unsigned short inst;
 
-	/* 
+	/*
 	 * Safe guard if DSP mode is already enabled or we're lacking
 	 * the DSP altogether.
 	 */
@@ -569,27 +623,49 @@
 #define is_dsp_inst(regs)	(0)
 #endif /* CONFIG_SH_DSP */
 
+#ifdef CONFIG_CPU_SH2A
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+				unsigned long r6, unsigned long r7,
+				struct pt_regs __regs)
+{
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	siginfo_t info;
+
+	switch (r4) {
+	case TRAP_DIVZERO_ERROR:
+		info.si_code = FPE_INTDIV;
+		break;
+	case TRAP_DIVOVF_ERROR:
+		info.si_code = FPE_INTOVF;
+		break;
+	}
+
+	force_sig_info(SIGFPE, &info, current);
+}
+#endif
+
 /* arch/sh/kernel/cpu/sh4/fpu.c */
 extern int do_fpu_inst(unsigned short, struct pt_regs *);
 extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
-		unsigned long r6, unsigned long r7, struct pt_regs regs);
+		unsigned long r6, unsigned long r7, struct pt_regs __regs);
 
 asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
 				unsigned long r6, unsigned long r7,
-				struct pt_regs regs)
+				struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	unsigned long error_code;
 	struct task_struct *tsk = current;
 
 #ifdef CONFIG_SH_FPU_EMU
-	unsigned short inst;
+	unsigned short inst = 0;
 	int err;
 
-	get_user(inst, (unsigned short*)regs.pc);
+	get_user(inst, (unsigned short*)regs->pc);
 
-	err = do_fpu_inst(inst, &regs);
+	err = do_fpu_inst(inst, regs);
 	if (!err) {
-		regs.pc += 2;
+		regs->pc += 2;
 		return;
 	}
 	/* not a FPU inst. */
@@ -597,20 +673,19 @@
 
 #ifdef CONFIG_SH_DSP
 	/* Check if it's a DSP instruction */
- 	if (is_dsp_inst(&regs)) {
+	if (is_dsp_inst(regs)) {
 		/* Enable DSP mode, and restart instruction. */
-		regs.sr |= SR_DSP;
+		regs->sr |= SR_DSP;
 		return;
 	}
 #endif
 
-	asm volatile("stc	r2_bank, %0": "=r" (error_code));
+	lookup_exception_vector(error_code);
+
 	local_irq_enable();
-	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = TRAP_RESERVED_INST;
-	CHK_REMOTE_DEBUG(&regs);
+	CHK_REMOTE_DEBUG(regs);
 	force_sig(SIGILL, tsk);
-	die_if_no_fixup("reserved instruction", &regs, error_code);
+	die_if_no_fixup("reserved instruction", regs, error_code);
 }
 
 #ifdef CONFIG_SH_FPU_EMU
@@ -658,39 +733,41 @@
 
 asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
 				unsigned long r6, unsigned long r7,
-				struct pt_regs regs)
+				struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	unsigned long error_code;
 	struct task_struct *tsk = current;
 #ifdef CONFIG_SH_FPU_EMU
-	unsigned short inst;
+	unsigned short inst = 0;
 
-	get_user(inst, (unsigned short *)regs.pc + 1);
-	if (!do_fpu_inst(inst, &regs)) {
-		get_user(inst, (unsigned short *)regs.pc);
-		if (!emulate_branch(inst, &regs))
+	get_user(inst, (unsigned short *)regs->pc + 1);
+	if (!do_fpu_inst(inst, regs)) {
+		get_user(inst, (unsigned short *)regs->pc);
+		if (!emulate_branch(inst, regs))
 			return;
 		/* fault in branch.*/
 	}
 	/* not a FPU inst. */
 #endif
 
-	asm volatile("stc	r2_bank, %0": "=r" (error_code));
+	lookup_exception_vector(error_code);
+
 	local_irq_enable();
-	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = TRAP_RESERVED_INST;
-	CHK_REMOTE_DEBUG(&regs);
+	CHK_REMOTE_DEBUG(regs);
 	force_sig(SIGILL, tsk);
-	die_if_no_fixup("illegal slot instruction", &regs, error_code);
+	die_if_no_fixup("illegal slot instruction", regs, error_code);
 }
 
 asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
 				   unsigned long r6, unsigned long r7,
-				   struct pt_regs regs)
+				   struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	long ex;
-	asm volatile("stc	r2_bank, %0" : "=r" (ex));
-	die_if_kernel("exception", &regs, ex);
+
+	lookup_exception_vector(ex);
+	die_if_kernel("exception", regs, ex);
 }
 
 #if defined(CONFIG_SH_STANDARD_BIOS)
@@ -735,12 +812,16 @@
 {
 	extern void *exception_handling_table[];
 	void *old_handler;
-	
+
 	old_handler = exception_handling_table[vec];
 	exception_handling_table[vec] = handler;
 	return old_handler;
 }
 
+extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
+					     unsigned long r6, unsigned long r7,
+					     struct pt_regs __regs);
+
 void __init trap_init(void)
 {
 	set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
@@ -759,7 +840,15 @@
 	set_exception_table_evt(0x800, do_fpu_state_restore);
 	set_exception_table_evt(0x820, do_fpu_state_restore);
 #endif
-		
+
+#ifdef CONFIG_CPU_SH2
+	set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
+#endif
+#ifdef CONFIG_CPU_SH2A
+	set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+	set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#endif
+
 	/* Setup VBR for boot cpu */
 	per_cpu_trap_init();
 }
@@ -784,6 +873,11 @@
 	}
 
 	printk("\n");
+
+	if (!tsk)
+		tsk = current;
+
+	debug_show_held_locks(tsk);
 }
 
 void show_stack(struct task_struct *tsk, unsigned long *sp)
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 075d6cc..deb4694 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -97,7 +97,7 @@
 		goto up_fail;
 	}
 
-	vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 	if (!vma) {
 		ret = -ENOMEM;
 		goto up_fail;
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 9dd6064..4e0362f 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -4,8 +4,12 @@
 # Processor families
 #
 config CPU_SH2
+	select SH_WRITETHROUGH if !CPU_SH2A
 	bool
-	select SH_WRITETHROUGH
+
+config CPU_SH2A
+	bool
+	select CPU_SH2
 
 config CPU_SH3
 	bool
@@ -16,6 +20,7 @@
 	bool
 	select CPU_HAS_INTEVT
 	select CPU_HAS_SR_RB
+	select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40
 
 config CPU_SH4A
 	bool
@@ -40,6 +45,16 @@
 	bool "Support SH7604 processor"
 	select CPU_SH2
 
+config CPU_SUBTYPE_SH7619
+	bool "Support SH7619 processor"
+	select CPU_SH2
+
+comment "SH-2A Processor Support"
+
+config CPU_SUBTYPE_SH7206
+	bool "Support SH7206 processor"
+	select CPU_SH2A
+
 comment "SH-3 Processor Support"
 
 config CPU_SUBTYPE_SH7300
@@ -89,6 +104,7 @@
 config CPU_SUBTYPE_SH7750
 	bool "Support SH7750 processor"
 	select CPU_SH4
+	select CPU_HAS_IPR_IRQ
 	help
 	  Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
 
@@ -104,15 +120,18 @@
 	bool "Support SH7750R processor"
 	select CPU_SH4
 	select CPU_SUBTYPE_SH7750
+	select CPU_HAS_IPR_IRQ
 
 config CPU_SUBTYPE_SH7750S
 	bool "Support SH7750S processor"
 	select CPU_SH4
 	select CPU_SUBTYPE_SH7750
+	select CPU_HAS_IPR_IRQ
 
 config CPU_SUBTYPE_SH7751
 	bool "Support SH7751 processor"
 	select CPU_SH4
+	select CPU_HAS_IPR_IRQ
 	help
 	  Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
 	  or if you have a HD6417751R CPU.
@@ -121,6 +140,7 @@
 	bool "Support SH7751R processor"
 	select CPU_SH4
 	select CPU_SUBTYPE_SH7751
+	select CPU_HAS_IPR_IRQ
 
 config CPU_SUBTYPE_SH7760
 	bool "Support SH7760 processor"
@@ -157,6 +177,11 @@
 	select CPU_SH4A
 	select CPU_HAS_INTC2_IRQ
 
+config CPU_SUBTYPE_SH7785
+	bool "Support SH7785 processor"
+	select CPU_SH4A
+	select CPU_HAS_INTC2_IRQ
+
 comment "SH4AL-DSP Processor Support"
 
 config CPU_SUBTYPE_SH73180
@@ -216,13 +241,22 @@
 
 config 32BIT
 	bool "Support 32-bit physical addressing through PMB"
-	depends on CPU_SH4A && MMU
+	depends on CPU_SH4A && MMU && (!X2TLB || BROKEN)
 	default y
 	help
 	  If you say Y here, physical addressing will be extended to
 	  32-bits through the SH-4A PMB. If this is not set, legacy
 	  29-bit physical addressing will be used.
 
+config X2TLB
+	bool "Enable extended TLB mode"
+	depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL
+	help
+	  Selecting this option will enable the extended mode of the SH-X2
+	  TLB. For legacy SH-X behaviour and interoperability, say N. For
+	  all of the fun new features and a willingless to submit bug reports,
+	  say Y.
+
 config VSYSCALL
 	bool "Support vsyscall page"
 	depends on MMU
@@ -237,16 +271,52 @@
 	  (the default value) say Y.
 
 choice
+	prompt "Kernel page size"
+	default PAGE_SIZE_4KB
+
+config PAGE_SIZE_4KB
+	bool "4kB"
+	help
+	  This is the default page size used by all SuperH CPUs.
+
+config PAGE_SIZE_8KB
+	bool "8kB"
+	depends on EXPERIMENTAL && X2TLB
+	help
+	  This enables 8kB pages as supported by SH-X2 and later MMUs.
+
+config PAGE_SIZE_64KB
+	bool "64kB"
+	depends on EXPERIMENTAL && CPU_SH4
+	help
+	  This enables support for 64kB pages, possible on all SH-4
+	  CPUs and later. Highly experimental, not recommended.
+
+endchoice
+
+choice
 	prompt "HugeTLB page size"
 	depends on HUGETLB_PAGE && CPU_SH4 && MMU
 	default HUGETLB_PAGE_SIZE_64K
 
 config HUGETLB_PAGE_SIZE_64K
-	bool "64K"
+	bool "64kB"
+
+config HUGETLB_PAGE_SIZE_256K
+	bool "256kB"
+	depends on X2TLB
 
 config HUGETLB_PAGE_SIZE_1MB
 	bool "1MB"
 
+config HUGETLB_PAGE_SIZE_4MB
+	bool "4MB"
+	depends on X2TLB
+
+config HUGETLB_PAGE_SIZE_64MB
+	bool "64MB"
+	depends on X2TLB
+
 endchoice
 
 source "mm/Kconfig"
@@ -274,7 +344,6 @@
 
 config SH_WRITETHROUGH
 	bool "Use write-through caching"
-	default y if CPU_SH2
 	help
 	  Selecting this option will configure the caches in write-through
 	  mode, as opposed to the default write-back configuration.
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
index 2689cb2..6614033 100644
--- a/arch/sh/mm/cache-sh2.c
+++ b/arch/sh/mm/cache-sh2.c
@@ -5,6 +5,7 @@
  *
  * Released under the terms of the GNU GPL v2.0.
  */
+
 #include <linux/init.h>
 #include <linux/mm.h>
 
@@ -14,37 +15,43 @@
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 
-/*
- * Calculate the OC address and set the way bit on the SH-2.
- *
- * We must have already jump_to_P2()'ed prior to calling this
- * function, since we rely on CCR manipulation to do the
- * Right Thing(tm).
- */
-unsigned long __get_oc_addr(unsigned long set, unsigned long way)
+void __flush_wback_region(void *start, int size)
 {
-	unsigned long ccr;
+	unsigned long v;
+	unsigned long begin, end;
 
-	/*
-	 * On SH-2 the way bit isn't tracked in the address field
-	 * if we're doing address array access .. instead, we need
-	 * to manually switch out the way in the CCR.
-	 */
-	ccr = ctrl_inl(CCR);
-	ccr &= ~0x00c0;
-	ccr |= way << cpu_data->dcache.way_shift;
+	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+		& ~(L1_CACHE_BYTES-1);
+	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+		/* FIXME cache purge */
+		ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+	}
+}
 
-	/*
-	 * Despite the number of sets being halved, we end up losing
-	 * the first 2 ways to OCRAM instead of the last 2 (if we're
-	 * 4-way). As a result, forcibly setting the W1 bit handily
-	 * bumps us up 2 ways.
-	 */
-	if (ccr & CCR_CACHE_ORA)
-		ccr |= 1 << (cpu_data->dcache.way_shift + 1);
+void __flush_purge_region(void *start, int size)
+{
+	unsigned long v;
+	unsigned long begin, end;
 
-	ctrl_outl(ccr, CCR);
+	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+		& ~(L1_CACHE_BYTES-1);
+	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+		ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+	}
+}
 
-	return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift);
+void __flush_invalidate_region(void *start, int size)
+{
+	unsigned long v;
+	unsigned long begin, end;
+
+	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+		& ~(L1_CACHE_BYTES-1);
+	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+		ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+	}
 }
 
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index e48cc22..ae531af 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -11,12 +11,8 @@
  */
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <asm/addrspace.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
@@ -83,9 +79,9 @@
  */
 
 /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
-#define MAX_P3_SEMAPHORES 16
+#define MAX_P3_MUTEXES 16
 
-struct semaphore p3map_sem[MAX_P3_SEMAPHORES];
+struct mutex p3map_mutex[MAX_P3_MUTEXES];
 
 void __init p3_cache_init(void)
 {
@@ -115,7 +111,7 @@
 		panic("%s failed.", __FUNCTION__);
 
 	for (i = 0; i < cpu_data->dcache.n_aliases; i++)
-		sema_init(&p3map_sem[i], 1);
+		mutex_init(&p3map_mutex[i]);
 }
 
 /*
@@ -229,7 +225,7 @@
 	 */
 	if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
 	    (start < CACHE_OC_ADDRESS_ARRAY))
-	    	exec_offset = 0x20000000;
+		exec_offset = 0x20000000;
 
 	local_irq_save(flags);
 	__flush_cache_4096(start | SH_CACHE_ASSOC,
@@ -250,7 +246,7 @@
 
 		/* Loop all the D-cache */
 		n = cpu_data->dcache.n_aliases;
-		for (i = 0; i < n; i++, addr += PAGE_SIZE)
+		for (i = 0; i < n; i++, addr += 4096)
 			flush_cache_4096(addr, phys);
 	}
 
diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S
index 7b96425..8a70613 100644
--- a/arch/sh/mm/clear_page.S
+++ b/arch/sh/mm/clear_page.S
@@ -1,12 +1,12 @@
-/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $
- *
+/*
  * __clear_user_page, __clear_user, clear_page implementation of SuperH
  *
  * Copyright (C) 2001  Kaz Kojima
  * Copyright (C) 2001, 2002  Niibe Yutaka
- *
+ * Copyright (C) 2006  Paul Mundt
  */
 #include <linux/linkage.h>
+#include <asm/page.h>
 
 /*
  * clear_page_slow
@@ -18,11 +18,11 @@
 /*
  * r0 --- scratch
  * r4 --- to
- * r5 --- to + 4096
+ * r5 --- to + PAGE_SIZE
  */
 ENTRY(clear_page_slow)
 	mov	r4,r5
-	mov.w	.Llimit,r0
+	mov.l	.Llimit,r0
 	add	r0,r5
 	mov	#0,r0
 	!
@@ -50,7 +50,7 @@
 	!
 	rts
 	 nop
-.Llimit:	.word	(4096-28)
+.Llimit:	.long	(PAGE_SIZE-28)
 
 ENTRY(__clear_user)
 	!
@@ -164,10 +164,10 @@
  * r0 --- scratch 
  * r4 --- to
  * r5 --- orig_to
- * r6 --- to + 4096
+ * r6 --- to + PAGE_SIZE
  */
 ENTRY(__clear_user_page)
-	mov.w	.L4096,r0
+	mov.l	.Lpsz,r0
 	mov	r4,r6
 	add	r0,r6
 	mov	#0,r0
@@ -191,7 +191,7 @@
 	!
 	rts
 	 nop
-.L4096:	.word	4096
+.Lpsz:	.long	PAGE_SIZE
 
 #endif
 
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
index 1addffe..397c94c 100644
--- a/arch/sh/mm/copy_page.S
+++ b/arch/sh/mm/copy_page.S
@@ -1,12 +1,12 @@
-/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $
- *
+/*
  * copy_page, __copy_user_page, __copy_user implementation of SuperH
  *
  * Copyright (C) 2001  Niibe Yutaka & Kaz Kojima
  * Copyright (C) 2002  Toshinobu Sugioka
- *
+ * Copyright (C) 2006  Paul Mundt
  */
 #include <linux/linkage.h>
+#include <asm/page.h>
 
 /*
  * copy_page_slow
@@ -18,7 +18,7 @@
 
 /*
  * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
  * r9 --- not used
  * r10 --- to
  * r11 --- from
@@ -30,7 +30,7 @@
 	mov	r4,r10
 	mov	r5,r11
 	mov	r5,r8
-	mov.w	.L4096,r0
+	mov.l	.Lpsz,r0
 	add	r0,r8
 	!
 1:	mov.l	@r11+,r0
@@ -80,7 +80,7 @@
 
 /*
  * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
  * r9 --- orig_to
  * r10 --- to
  * r11 --- from
@@ -94,7 +94,7 @@
 	mov	r5,r11
 	mov	r6,r9
 	mov	r5,r8
-	mov.w	.L4096,r0
+	mov.l	.Lpsz,r0
 	add	r0,r8
 	!
 1:	ocbi	@r9
@@ -129,7 +129,7 @@
 	rts
 	 nop
 #endif
-.L4096:	.word	4096
+.Lpsz:	.long	PAGE_SIZE
 /*
  * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
  * Return the number of bytes NOT copied
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 68663b8..716ebf5 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -26,13 +26,19 @@
  * and the problem, and then passes it off to one of the appropriate
  * routines.
  */
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
-			      unsigned long address)
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+					unsigned long writeaccess,
+					unsigned long address)
 {
 	struct task_struct *tsk;
 	struct mm_struct *mm;
 	struct vm_area_struct * vma;
 	unsigned long page;
+	int si_code;
+	siginfo_t info;
+
+	trace_hardirqs_on();
+	local_irq_enable();
 
 #ifdef CONFIG_SH_KGDB
 	if (kgdb_nofault && kgdb_bus_err_hook)
@@ -41,6 +47,46 @@
 
 	tsk = current;
 	mm = tsk->mm;
+	si_code = SEGV_MAPERR;
+
+	if (unlikely(address >= TASK_SIZE)) {
+		/*
+		 * Synchronize this task's top level page-table
+		 * with the 'reference' page table.
+		 *
+		 * Do _not_ use "tsk" here. We might be inside
+		 * an interrupt in the middle of a task switch..
+		 */
+		int offset = pgd_index(address);
+		pgd_t *pgd, *pgd_k;
+		pud_t *pud, *pud_k;
+		pmd_t *pmd, *pmd_k;
+
+		pgd = get_TTB() + offset;
+		pgd_k = swapper_pg_dir + offset;
+
+		/* This will never happen with the folded page table. */
+		if (!pgd_present(*pgd)) {
+			if (!pgd_present(*pgd_k))
+				goto bad_area_nosemaphore;
+			set_pgd(pgd, *pgd_k);
+			return;
+		}
+
+		pud = pud_offset(pgd, address);
+		pud_k = pud_offset(pgd_k, address);
+		if (pud_present(*pud) || !pud_present(*pud_k))
+			goto bad_area_nosemaphore;
+		set_pud(pud, *pud_k);
+
+		pmd = pmd_offset(pud, address);
+		pmd_k = pmd_offset(pud_k, address);
+		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+			goto bad_area_nosemaphore;
+		set_pmd(pmd, *pmd_k);
+
+		return;
+	}
 
 	/*
 	 * If we're in an interrupt or have no user
@@ -65,6 +111,7 @@
  * we can handle it..
  */
 good_area:
+	si_code = SEGV_ACCERR;
 	if (writeaccess) {
 		if (!(vma->vm_flags & VM_WRITE))
 			goto bad_area;
@@ -104,10 +151,13 @@
 bad_area:
 	up_read(&mm->mmap_sem);
 
+bad_area_nosemaphore:
 	if (user_mode(regs)) {
-		tsk->thread.address = address;
-		tsk->thread.error_code = writeaccess;
-		force_sig(SIGSEGV, tsk);
+		info.si_signo = SIGSEGV;
+		info.si_errno = 0;
+		info.si_code = si_code;
+		info.si_addr = (void *) address;
+		force_sig_info(SIGSEGV, &info, tsk);
 		return;
 	}
 
@@ -127,11 +177,9 @@
 		printk(KERN_ALERT "Unable to handle kernel paging request");
 	printk(" at virtual address %08lx\n", address);
 	printk(KERN_ALERT "pc = %08lx\n", regs->pc);
-	asm volatile("mov.l	%1, %0"
-		     : "=r" (page)
-		     : "m" (__m(MMU_TTB)));
+	page = (unsigned long)get_TTB();
 	if (page) {
-		page = ((unsigned long *) page)[address >> 22];
+		page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
 		printk(KERN_ALERT "*pde = %08lx\n", page);
 		if (page & _PAGE_PRESENT) {
 			page &= PAGE_MASK;
@@ -166,98 +214,13 @@
 	 * Send a sigbus, regardless of whether we were in kernel
 	 * or user mode.
 	 */
-	tsk->thread.address = address;
-	tsk->thread.error_code = writeaccess;
-	tsk->thread.trap_no = 14;
-	force_sig(SIGBUS, tsk);
+	info.si_signo = SIGBUS;
+	info.si_errno = 0;
+	info.si_code = BUS_ADRERR;
+	info.si_addr = (void *)address;
+	force_sig_info(SIGBUS, &info, tsk);
 
 	/* Kernel mode? Handle exceptions or die */
 	if (!user_mode(regs))
 		goto no_context;
 }
-
-#ifdef CONFIG_SH_STORE_QUEUES
-/*
- * This is a special case for the SH-4 store queues, as pages for this
- * space still need to be faulted in before it's possible to flush the
- * store queue cache for writeout to the remapped region.
- */
-#define P3_ADDR_MAX		(P4SEG_STORE_QUE + 0x04000000)
-#else
-#define P3_ADDR_MAX		P4SEG
-#endif
-
-/*
- * Called with interrupts disabled.
- */
-asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
-					 unsigned long writeaccess,
-					 unsigned long address)
-{
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *pte;
-	pte_t entry;
-	struct mm_struct *mm = current->mm;
-	spinlock_t *ptl;
-	int ret = 1;
-
-#ifdef CONFIG_SH_KGDB
-	if (kgdb_nofault && kgdb_bus_err_hook)
-		kgdb_bus_err_hook();
-#endif
-
-	/*
-	 * We don't take page faults for P1, P2, and parts of P4, these
-	 * are always mapped, whether it be due to legacy behaviour in
-	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
-	 */
-	if (address >= P3SEG && address < P3_ADDR_MAX) {
-		pgd = pgd_offset_k(address);
-		mm = NULL;
-	} else {
-		if (unlikely(address >= TASK_SIZE || !mm))
-			return 1;
-
-		pgd = pgd_offset(mm, address);
-	}
-
-	pud = pud_offset(pgd, address);
-	if (pud_none_or_clear_bad(pud))
-		return 1;
-	pmd = pmd_offset(pud, address);
-	if (pmd_none_or_clear_bad(pmd))
-		return 1;
-
-	if (mm)
-		pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-	else
-		pte = pte_offset_kernel(pmd, address);
-
-	entry = *pte;
-	if (unlikely(pte_none(entry) || pte_not_present(entry)))
-		goto unlock;
-	if (unlikely(writeaccess && !pte_write(entry)))
-		goto unlock;
-
-	if (writeaccess)
-		entry = pte_mkdirty(entry);
-	entry = pte_mkyoung(entry);
-
-#ifdef CONFIG_CPU_SH4
-	/*
-	 * ITLB is not affected by "ldtlb" instruction.
-	 * So, we need to flush the entry by ourselves.
-	 */
-	__flush_tlb_page(get_asid(), address & PAGE_MASK);
-#endif
-
-	set_pte(pte, entry);
-	update_mmu_cache(NULL, address, entry);
-	ret = 0;
-unlock:
-	if (mm)
-		pte_unmap_unlock(pte, ptl);
-	return ret;
-}
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 329059d..cf2c2ee 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -63,6 +63,11 @@
 	return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 struct page *follow_huge_addr(struct mm_struct *mm,
 			      unsigned long address, int write)
 {
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 7154d1c..59f4cc1 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -84,30 +84,22 @@
 	pmd_t *pmd;
 	pte_t *pte;
 
-	pgd = swapper_pg_dir + pgd_index(addr);
+	pgd = pgd_offset_k(addr);
 	if (pgd_none(*pgd)) {
 		pgd_ERROR(*pgd);
 		return;
 	}
 
-	pud = pud_offset(pgd, addr);
-	if (pud_none(*pud)) {
-		pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
-		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-		if (pmd != pmd_offset(pud, 0)) {
-			pud_ERROR(*pud);
-			return;
-		}
+	pud = pud_alloc(NULL, pgd, addr);
+	if (unlikely(!pud)) {
+		pud_ERROR(*pud);
+		return;
 	}
 
-	pmd = pmd_offset(pud, addr);
-	if (pmd_none(*pmd)) {
-		pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
-		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-		if (pte != pte_offset_kernel(pmd, 0)) {
-			pmd_ERROR(*pmd);
-			return;
-		}
+	pmd = pmd_alloc(NULL, pud, addr);
+	if (unlikely(!pmd)) {
+		pmd_ERROR(*pmd);
+		return;
 	}
 
 	pte = pte_offset_kernel(pmd, addr);
@@ -155,9 +147,6 @@
 
 /*
  * paging_init() sets up the page tables
- *
- * This routines also unmaps the page at virtual kernel address 0, so
- * that we can trap those pesky NULL-reference errors in the kernel.
  */
 void __init paging_init(void)
 {
@@ -180,14 +169,11 @@
 	 */
 	{
 		unsigned long max_dma, low, start_pfn;
-		pgd_t *pg_dir;
-		int i;
 
-		/* We don't need kernel mapping as hardware support that. */
-		pg_dir = swapper_pg_dir;
-
-		for (i = 0; i < PTRS_PER_PGD; i++)
-			pgd_val(pg_dir[i]) = 0;
+		/* We don't need to map the kernel through the TLB, as
+		 * it is permanatly mapped using P1. So clear the
+		 * entire pgd. */
+		memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 
 		/* Turn on the MMU */
 		enable_mmu();
@@ -206,6 +192,10 @@
 		}
 	}
 
+	/* Set an initial value for the MMU.TTB so we don't have to
+	 * check for a null value. */
+	set_TTB(swapper_pg_dir);
+
 #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
 	/*
 	 * If we don't have CONFIG_MMU set and the processor in question
@@ -227,7 +217,6 @@
 
 void __init mem_init(void)
 {
-	extern unsigned long empty_zero_page[1024];
 	int codesize, reservedpages, datasize, initsize;
 	int tmp;
 	extern unsigned long memory_start;
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index a9fe80c..11d54c1 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -28,9 +28,7 @@
 {
 	unsigned long end;
 	unsigned long pfn;
-	pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
-				   _PAGE_DIRTY | _PAGE_ACCESSED |
-				   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
+	pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
 
 	address &= ~PMD_MASK;
 	end = address + size;
diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c
index 1406d2e..bb23679 100644
--- a/arch/sh/mm/pg-dma.c
+++ b/arch/sh/mm/pg-dma.c
@@ -39,8 +39,6 @@
 
 static void clear_page_dma(void *to)
 {
-	extern unsigned long empty_zero_page[1024];
-
 	/*
 	 * We get invoked quite early on, if the DMAC hasn't been initialized
 	 * yet, fall back on the slow manual implementation.
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 07371ed..3f98d2a 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -6,22 +6,12 @@
  *
  * Released under the terms of the GNU GPL v2.0.
  */
-#include <linux/init.h>
-#include <linux/mman.h>
 #include <linux/mm.h>
-#include <linux/threads.h>
-#include <asm/addrspace.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
+#include <linux/mutex.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
-extern struct semaphore p3map_sem[];
+extern struct mutex p3map_mutex[];
 
 #define CACHE_ALIAS (cpu_data->dcache.alias_mask)
 
@@ -37,10 +27,6 @@
 	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
 		clear_page(to);
 	else {
-		pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
-					   _PAGE_RW | _PAGE_CACHABLE |
-					   _PAGE_DIRTY | _PAGE_ACCESSED |
-					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
 		unsigned long phys_addr = PHYSADDR(to);
 		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
 		pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -50,8 +36,8 @@
 		pte_t entry;
 		unsigned long flags;
 
-		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
-		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+		entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
+		mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
 		set_pte(pte, entry);
 		local_irq_save(flags);
 		__flush_tlb_page(get_asid(), p3_addr);
@@ -59,7 +45,7 @@
 		update_mmu_cache(NULL, p3_addr, entry);
 		__clear_user_page((void *)p3_addr, to);
 		pte_clear(&init_mm, p3_addr, pte);
-		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+		mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
 	}
 }
 
@@ -77,10 +63,6 @@
 	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
 		copy_page(to, from);
 	else {
-		pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
-					   _PAGE_RW | _PAGE_CACHABLE |
-					   _PAGE_DIRTY | _PAGE_ACCESSED |
-					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
 		unsigned long phys_addr = PHYSADDR(to);
 		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
 		pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -90,8 +72,8 @@
 		pte_t entry;
 		unsigned long flags;
 
-		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
-		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+		entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
+		mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
 		set_pte(pte, entry);
 		local_irq_save(flags);
 		__flush_tlb_page(get_asid(), p3_addr);
@@ -99,7 +81,7 @@
 		update_mmu_cache(NULL, p3_addr, entry);
 		__copy_user_page((void *)p3_addr, from, to);
 		pte_clear(&init_mm, p3_addr, pte);
-		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+		mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
 	}
 }
 
@@ -122,4 +104,3 @@
 	}
 	return pte;
 }
-
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 92e7453..b60ad83 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -30,7 +30,7 @@
 
 #define NR_PMB_ENTRIES	16
 
-static kmem_cache_t *pmb_cache;
+static struct kmem_cache *pmb_cache;
 static unsigned long pmb_map;
 
 static struct pmb_entry pmb_init_map[] = {
@@ -283,7 +283,7 @@
 	} while (pmbe);
 }
 
-static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
+static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct pmb_entry *pmbe = pmb;
 
@@ -297,7 +297,7 @@
 	spin_unlock_irq(&pmb_list_lock);
 }
 
-static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
+static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
 {
 	spin_lock_irq(&pmb_list_lock);
 	pmb_list_del(pmb);
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index ac57638..0571755 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -30,3 +30,5 @@
 TITAN			SH_TITAN
 SHMIN			SH_SHMIN
 7710VOIPGW		SH_7710VOIPGW
+7206SE			SH_7206_SOLUTION_ENGINE
+7619SE			SH_7619_SOLUTION_ENGINE
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
index ffb310e..b9e7d54 100644
--- a/arch/sh64/kernel/setup.c
+++ b/arch/sh64/kernel/setup.c
@@ -243,9 +243,7 @@
 		if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) {
 		        reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE);
 
-			initrd_start =
-			  (long) INITRD_START ? INITRD_START + PAGE_OFFSET +  __MEMORY_START : 0;
-
+			initrd_start = (long) INITRD_START + PAGE_OFFSET + __MEMORY_START;
 			initrd_end = initrd_start + INITRD_SIZE;
 		} else {
 			printk("initrd extends beyond end of memory "
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
index 9e2ffc4..1666d3e 100644
--- a/arch/sh64/kernel/signal.c
+++ b/arch/sh64/kernel/signal.c
@@ -22,7 +22,7 @@
 #include <linux/errno.h>
 #include <linux/wait.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
 #include <linux/stddef.h>
diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c
index 8e2f6c2..4f72ab3 100644
--- a/arch/sh64/mm/fault.c
+++ b/arch/sh64/mm/fault.c
@@ -154,7 +154,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	/* TLB misses upon some cache flushes get done under cli() */
diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c
index 187cf01..4b455f6 100644
--- a/arch/sh64/mm/hugetlbpage.c
+++ b/arch/sh64/mm/hugetlbpage.c
@@ -53,6 +53,11 @@
 	return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, pte_t entry)
 {
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 4d8ed9c..01fc6c2 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -35,7 +35,7 @@
 	unsigned long vaddr;
 
 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-	inc_preempt_count();
+	pagefault_disable();
 	if (!PageHighMem(page))
 		return page_address(page);
 
@@ -70,8 +70,7 @@
 	unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
 
 	if (vaddr < FIXADDR_START) { // FIXME
-		dec_preempt_count();
-		preempt_check_resched();
+		pagefault_enable();
 		return;
 	}
 
@@ -97,8 +96,7 @@
 #endif
 #endif
 
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 /* We may be fed a pagetable here by ptep_to_xxx and others. */
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
index a98f3ae..9ad84ff 100644
--- a/arch/sparc64/kernel/binfmt_elf32.c
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -141,7 +141,6 @@
 	value->tv_sec = jiffies / HZ;
 }
 
-#define elf_addr_t	u32
 #undef start_thread
 #define start_thread start_thread32
 #define init_elf_binfmt init_elf32_binfmt
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 53b9b1f..33fd0b2 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -235,6 +235,11 @@
 	return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, pte_t entry)
 {
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 09cb7fc..a8e8802 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -176,9 +176,9 @@
 
 int bigkernel = 0;
 
-kmem_cache_t *pgtable_cache __read_mostly;
+struct kmem_cache *pgtable_cache __read_mostly;
 
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
 {
 	clear_page(addr);
 }
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index beaa028..236d02f 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -239,7 +239,7 @@
 	}
 }
 
-static kmem_cache_t *tsb_caches[8] __read_mostly;
+static struct kmem_cache *tsb_caches[8] __read_mostly;
 
 static const char *tsb_cache_names[8] = {
 	"tsb_8KB",
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 3576b3c..7d4190e 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -638,7 +638,7 @@
 	return -1;
 }
 
-void chan_interrupt(struct list_head *chans, struct work_struct *task,
+void chan_interrupt(struct list_head *chans, struct delayed_work *task,
 		    struct tty_struct *tty, int irq)
 {
 	struct list_head *ele, *next;
diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c
index 8243869..9c2e7a7 100644
--- a/arch/um/drivers/daemon_kern.c
+++ b/arch/um/drivers/daemon_kern.c
@@ -98,4 +98,4 @@
 	return 0;
 }
 
-__initcall(register_daemon);
+late_initcall(register_daemon);
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 426633e..aa3090d 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -31,9 +31,9 @@
 	return IRQ_HANDLED;
 }
 
-static void line_timer_cb(void *arg)
+static void line_timer_cb(struct work_struct *work)
 {
-	struct line *line = arg;
+	struct line *line = container_of(work, struct line, task.work);
 
 	if(!line->throttled)
 		chan_interrupt(&line->chan_list, &line->task, line->tty,
@@ -443,7 +443,7 @@
 		 * is registered.
 		 */
 		enable_chan(line);
-		INIT_WORK(&line->task, line_timer_cb, line);
+		INIT_DELAYED_WORK(&line->task, line_timer_cb);
 
 		if(!line->sigio){
 			chan_enable_winch(&line->chan_list, tty);
diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c
index c090fbd..52ccb7b 100644
--- a/arch/um/drivers/mcast_kern.c
+++ b/arch/um/drivers/mcast_kern.c
@@ -127,4 +127,4 @@
 	return 0;
 }
 
-__initcall(register_mcast);
+late_initcall(register_mcast);
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 7b17216..96f0189 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -56,7 +56,7 @@
 
 static LIST_HEAD(mc_requests);
 
-static void mc_work_proc(void *unused)
+static void mc_work_proc(struct work_struct *unused)
 {
 	struct mconsole_entry *req;
 	unsigned long flags;
@@ -72,7 +72,7 @@
 	}
 }
 
-static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
+static DECLARE_WORK(mconsole_work, mc_work_proc);
 
 static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
 {
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index ec9eb8b..286bc0b 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -99,6 +99,7 @@
 		 * same device, since it tests for (dev->flags & IFF_UP). So
 		 * there's no harm in delaying the device shutdown. */
 		schedule_work(&close_work);
+#error this is not permitted - close_work will go out of scope
 		goto out;
 	}
 	reactivate_fd(lp->fd, UM_ETH_IRQ);
diff --git a/arch/um/drivers/pcap_kern.c b/arch/um/drivers/pcap_kern.c
index 6e1ef85..e67362a 100644
--- a/arch/um/drivers/pcap_kern.c
+++ b/arch/um/drivers/pcap_kern.c
@@ -109,4 +109,4 @@
 	return 0;
 }
 
-__initcall(register_pcap);
+late_initcall(register_pcap);
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index ce9f373..6dfe632 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -132,7 +132,7 @@
 DECLARE_MUTEX(ports_sem);
 struct list_head ports = LIST_HEAD_INIT(ports);
 
-void port_work_proc(void *unused)
+void port_work_proc(struct work_struct *unused)
 {
 	struct port_list *port;
 	struct list_head *ele;
@@ -150,7 +150,7 @@
 	local_irq_restore(flags);
 }
 
-DECLARE_WORK(port_work, port_work_proc, NULL);
+DECLARE_WORK(port_work, port_work_proc);
 
 static irqreturn_t port_interrupt(int irq, void *data)
 {
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c
index 788da54..25634bd 100644
--- a/arch/um/drivers/slip_kern.c
+++ b/arch/um/drivers/slip_kern.c
@@ -95,4 +95,4 @@
 	return 0;
 }
 
-__initcall(register_slip);
+late_initcall(register_slip);
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c
index ae322e1..b3ed8fb 100644
--- a/arch/um/drivers/slirp_kern.c
+++ b/arch/um/drivers/slirp_kern.c
@@ -119,4 +119,4 @@
 	return 0;
 }
 
-__initcall(register_slirp);
+late_initcall(register_slirp);
diff --git a/arch/um/include/chan_kern.h b/arch/um/include/chan_kern.h
index 572d286..9003a34 100644
--- a/arch/um/include/chan_kern.h
+++ b/arch/um/include/chan_kern.h
@@ -27,7 +27,7 @@
 	void *data;
 };
 
-extern void chan_interrupt(struct list_head *chans, struct work_struct *task,
+extern void chan_interrupt(struct list_head *chans, struct delayed_work *task,
 			   struct tty_struct *tty, int irq);
 extern int parse_chan_pair(char *str, struct line *line, int device,
 			   const struct chan_opts *opts);
diff --git a/arch/um/include/line.h b/arch/um/include/line.h
index 7be2481..214ee76 100644
--- a/arch/um/include/line.h
+++ b/arch/um/include/line.h
@@ -51,7 +51,7 @@
 	char *tail;
 
 	int sigio;
-	struct work_struct task;
+	struct delayed_work task;
 	const struct line_driver *driver;
 	int have_irq;
 };
diff --git a/arch/um/include/sysdep-i386/ptrace.h b/arch/um/include/sysdep-i386/ptrace.h
index 6670cc9..52b398b 100644
--- a/arch/um/include/sysdep-i386/ptrace.h
+++ b/arch/um/include/sysdep-i386/ptrace.h
@@ -75,7 +75,7 @@
 #endif
 #ifdef UML_CONFIG_MODE_SKAS
 	struct skas_regs {
-		unsigned long regs[HOST_FRAME_SIZE];
+		unsigned long regs[MAX_REG_NR];
 		unsigned long fp[HOST_FP_SIZE];
 		unsigned long xfp[HOST_XFP_SIZE];
                 struct faultinfo faultinfo;
diff --git a/arch/um/include/sysdep-i386/stub.h b/arch/um/include/sysdep-i386/stub.h
index b492b12..4fffae7 100644
--- a/arch/um/include/sysdep-i386/stub.h
+++ b/arch/um/include/sysdep-i386/stub.h
@@ -9,6 +9,7 @@
 #include <sys/mman.h>
 #include <asm/ptrace.h>
 #include <asm/unistd.h>
+#include <asm/page.h>
 #include "stub-data.h"
 #include "kern_constants.h"
 #include "uml-config.h"
diff --git a/arch/um/include/sysdep-x86_64/ptrace.h b/arch/um/include/sysdep-x86_64/ptrace.h
index 617bb9ef..66cb400 100644
--- a/arch/um/include/sysdep-x86_64/ptrace.h
+++ b/arch/um/include/sysdep-x86_64/ptrace.h
@@ -108,7 +108,7 @@
 		 * file size, while i386 uses FRAME_SIZE.  Therefore, we need
 		 * to use UM_FRAME_SIZE here instead of HOST_FRAME_SIZE.
 		 */
-		unsigned long regs[UM_FRAME_SIZE];
+		unsigned long regs[MAX_REG_NR];
 		unsigned long fp[HOST_FP_SIZE];
                 struct faultinfo faultinfo;
 		long syscall;
diff --git a/arch/um/os-Linux/drivers/ethertap_kern.c b/arch/um/os-Linux/drivers/ethertap_kern.c
index 16385e2..7054182 100644
--- a/arch/um/os-Linux/drivers/ethertap_kern.c
+++ b/arch/um/os-Linux/drivers/ethertap_kern.c
@@ -105,4 +105,4 @@
 	return 0;
 }
 
-__initcall(register_ethertap);
+late_initcall(register_ethertap);
diff --git a/arch/um/os-Linux/drivers/tuntap_kern.c b/arch/um/os-Linux/drivers/tuntap_kern.c
index 0edbac6..76570a2 100644
--- a/arch/um/os-Linux/drivers/tuntap_kern.c
+++ b/arch/um/os-Linux/drivers/tuntap_kern.c
@@ -90,4 +90,4 @@
 	return 0;
 }
 
-__initcall(register_tuntap);
+late_initcall(register_tuntap);
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index e299ee5..49057d8 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -3,7 +3,6 @@
  * Licensed under the GPL
  */
 
-#include "linux/stddef.h"
 #include "linux/sched.h"
 #include "linux/slab.h"
 #include "linux/types.h"
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c
index 5f3cc66..01212c8 100644
--- a/arch/um/sys-i386/ptrace_user.c
+++ b/arch/um/sys-i386/ptrace_user.c
@@ -4,9 +4,9 @@
  */
 
 #include <stdio.h>
+#include <stddef.h>
 #include <errno.h>
 #include <unistd.h>
-#include <linux/stddef.h>
 #include "ptrace_user.h"
 /* Grr, asm/user.h includes asm/ptrace.h, so has to follow ptrace_user.h */
 #include <asm/user.h>
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
index 6f4ef2b..447306b 100644
--- a/arch/um/sys-i386/user-offsets.c
+++ b/arch/um/sys-i386/user-offsets.c
@@ -2,7 +2,7 @@
 #include <signal.h>
 #include <asm/ptrace.h>
 #include <asm/user.h>
-#include <linux/stddef.h>
+#include <stddef.h>
 #include <sys/poll.h>
 
 #define DEFINE(sym, val) \
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 82ef182..543ef4f 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -305,8 +305,6 @@
 #undef MODULE_DESCRIPTION
 #undef MODULE_AUTHOR
 
-#define elf_addr_t __u32
-
 static void elf32_init(struct pt_regs *);
 
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
@@ -351,7 +349,7 @@
 		bprm->loader += stack_base;
 	bprm->exec += stack_base;
 
-	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!mpnt) 
 		return -ENOMEM; 
 
diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
index 3a01329..3e5ed20 100644
--- a/arch/x86_64/ia32/syscall32.c
+++ b/arch/x86_64/ia32/syscall32.c
@@ -49,7 +49,7 @@
 	struct mm_struct *mm = current->mm;
 	int ret;
 
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!vma)
 		return -ENOMEM;
 
diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c
index 3525f88..95a7a2c 100644
--- a/arch/x86_64/kernel/crash.c
+++ b/arch/x86_64/kernel/crash.c
@@ -28,71 +28,6 @@
 /* This keeps a track of which one is crashing cpu. */
 static int crashing_cpu;
 
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type,
-						void *data, size_t data_len)
-{
-	struct elf_note note;
-
-	note.n_namesz = strlen(name) + 1;
-	note.n_descsz = data_len;
-	note.n_type   = type;
-	memcpy(buf, &note, sizeof(note));
-	buf += (sizeof(note) +3)/4;
-	memcpy(buf, name, note.n_namesz);
-	buf += (note.n_namesz + 3)/4;
-	memcpy(buf, data, note.n_descsz);
-	buf += (note.n_descsz + 3)/4;
-
-	return buf;
-}
-
-static void final_note(u32 *buf)
-{
-	struct elf_note note;
-
-	note.n_namesz = 0;
-	note.n_descsz = 0;
-	note.n_type   = 0;
-	memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
-	struct elf_prstatus prstatus;
-	u32 *buf;
-
-	if ((cpu < 0) || (cpu >= NR_CPUS))
-		return;
-
-	/* Using ELF notes here is opportunistic.
-	 * I need a well defined structure format
-	 * for the data I pass, and I need tags
-	 * on the data to indicate what information I have
-	 * squirrelled away.  ELF notes happen to provide
-	 * all of that, no need to invent something new.
-	 */
-
-	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-
-	if (!buf)
-		return;
-
-	memset(&prstatus, 0, sizeof(prstatus));
-	prstatus.pr_pid = current->pid;
-	elf_core_copy_regs(&prstatus.pr_reg, regs);
-	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
-					sizeof(prstatus));
-	final_note(buf);
-}
-
-static void crash_save_self(struct pt_regs *regs)
-{
-	int cpu;
-
-	cpu = smp_processor_id();
-	crash_save_this_cpu(regs, cpu);
-}
-
 #ifdef CONFIG_SMP
 static atomic_t waiting_for_crash_ipi;
 
@@ -117,7 +52,7 @@
 		return NOTIFY_STOP;
 	local_irq_disable();
 
-	crash_save_this_cpu(regs, cpu);
+	crash_save_cpu(regs, cpu);
 	disable_local_APIC();
 	atomic_dec(&waiting_for_crash_ipi);
 	/* Assume hlt works */
@@ -196,5 +131,5 @@
 
 	disable_IO_APIC();
 
-	crash_save_self(regs);
+	crash_save_cpu(regs, smp_processor_id());
 }
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index ac24156..209c8c0 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -224,7 +224,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, 0);
 	mutex_unlock(&kprobe_mutex);
 }
 
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index a7440cb..ac08503 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -306,8 +306,8 @@
  */
 
 static int check_interval = 5 * 60; /* 5 minutes */
-static void mcheck_timer(void *data);
-static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
+static void mcheck_timer(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
 
 static void mcheck_check_cpu(void *info)
 {
@@ -315,7 +315,7 @@
 		do_machine_check(NULL, 0);
 }
 
-static void mcheck_timer(void *data)
+static void mcheck_timer(struct work_struct *work)
 {
 	on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
 	schedule_delayed_work(&mcheck_work, check_interval * HZ);
@@ -641,7 +641,6 @@
 	return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void mce_remove_device(unsigned int cpu)
 {
 	int i;
@@ -675,7 +674,6 @@
 static struct notifier_block mce_cpu_notifier = {
 	.notifier_call = mce_cpu_callback,
 };
-#endif
 
 static __init int mce_init_device(void)
 {
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 883fe74..fa09deb 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -551,7 +551,6 @@
 	return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 /*
  * let's be hotplug friendly.
  * in case of multiple core processors, the first core always takes ownership
@@ -594,12 +593,14 @@
 
 	sprintf(name, "threshold_bank%i", bank);
 
+#ifdef CONFIG_SMP
 	/* sibling symlink */
 	if (shared_bank[bank] && b->blocks->cpu != cpu) {
 		sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
 		per_cpu(threshold_banks, cpu)[bank] = NULL;
 		return;
 	}
+#endif
 
 	/* remove all sibling symlinks before unregistering */
 	for_each_cpu_mask(i, b->cpus) {
@@ -656,7 +657,6 @@
 static struct notifier_block threshold_cpu_notifier = {
 	.notifier_call = threshold_cpu_callback,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static __init int threshold_init_device(void)
 {
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 6595a4e..af425a8 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -471,8 +471,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
 			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
-			initrd_start =
-				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start+INITRD_SIZE;
 		}
 		else {
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 1bf0e09..af1ec4d 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -379,6 +379,10 @@
 		put_cpu();
 		return 0;
 	}
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
 	spin_lock_bh(&call_lock);
 	__smp_call_function_single(cpu, func, info, nonatomic, wait);
 	spin_unlock_bh(&call_lock);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 4c161c2..daf1933 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -754,14 +754,16 @@
 }
 
 struct create_idle {
+	struct work_struct work;
 	struct task_struct *idle;
 	struct completion done;
 	int cpu;
 };
 
-void do_fork_idle(void *_c_idle)
+void do_fork_idle(struct work_struct *work)
 {
-	struct create_idle *c_idle = _c_idle;
+	struct create_idle *c_idle =
+		container_of(work, struct create_idle, work);
 
 	c_idle->idle = fork_idle(c_idle->cpu);
 	complete(&c_idle->done);
@@ -776,10 +778,10 @@
 	int timeout;
 	unsigned long start_rip;
 	struct create_idle c_idle = {
+		.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
 		.cpu = cpu,
 		.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
 	};
-	DECLARE_WORK(work, do_fork_idle, &c_idle);
 
 	/* allocate memory for gdts of secondary cpus. Hotplug is considered */
 	if (!cpu_gdt_descr[cpu].address &&
@@ -826,9 +828,9 @@
 	 * thread.
 	 */
 	if (!keventd_up() || current_is_keventd())
-		work.func(work.data);
+		c_idle.work.func(&c_idle.work);
 	else {
-		schedule_work(&work);
+		schedule_work(&c_idle.work);
 		wait_for_completion(&c_idle.done);
 	}
 
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index e3ef544..9f05bc9 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -563,7 +563,7 @@
 static unsigned int cpufreq_init = 0;
 static struct work_struct cpufreq_delayed_get_work;
 
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *v)
 {
 	unsigned int cpu;
 	for_each_online_cpu(cpu) {
@@ -639,7 +639,7 @@
 
 static int __init cpufreq_tsc(void)
 {
-	INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+	INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
 	if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
 				       CPUFREQ_TRANSITION_NOTIFIER))
 		cpufreq_init = 1;
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index c3de9a0..4a673f5 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -42,6 +42,7 @@
 #include <asm/topology.h>
 
 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+#define __syscall_clobber "r11","rcx","memory"
 
 int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
 seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
@@ -274,7 +275,6 @@
 	vsyscall_set_cpu(raw_smp_processor_id());
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __cpuinit
 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
 {
@@ -283,7 +283,6 @@
 		smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
 	return NOTIFY_DONE;
 }
-#endif
 
 static void __init map_vsyscall(void)
 {
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 0024211..5934c4b 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1274,9 +1274,10 @@
  *
  * FIXME! dispatch queue is not a queue at all!
  */
-static void as_work_handler(void *data)
+static void as_work_handler(struct work_struct *work)
 {
-	struct request_queue *q = data;
+	struct as_data *ad = container_of(work, struct as_data, antic_work);
+	struct request_queue *q = ad->q;
 	unsigned long flags;
 
 	spin_lock_irqsave(q->queue_lock, flags);
@@ -1332,7 +1333,7 @@
 	ad->antic_timer.function = as_antic_timeout;
 	ad->antic_timer.data = (unsigned long)q;
 	init_timer(&ad->antic_timer);
-	INIT_WORK(&ad->antic_work, as_work_handler, q);
+	INIT_WORK(&ad->antic_work, as_work_handler);
 
 	INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
 	INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/blktrace.c b/block/blktrace.c
index 74e02c0..d3679dd 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -394,8 +394,7 @@
 	if (bt) {
 		if (bt->dropped_file)
 			debugfs_remove(bt->dropped_file);
-		if (bt->sequence)
-			free_percpu(bt->sequence);
+		free_percpu(bt->sequence);
 		if (bt->rchan)
 			relay_close(bt->rchan);
 		kfree(bt);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e9019ed..78c6b31 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -43,8 +43,8 @@
 #define RQ_CIC(rq)		((struct cfq_io_context*)(rq)->elevator_private)
 #define RQ_CFQQ(rq)		((rq)->elevator_private2)
 
-static kmem_cache_t *cfq_pool;
-static kmem_cache_t *cfq_ioc_pool;
+static struct kmem_cache *cfq_pool;
+static struct kmem_cache *cfq_ioc_pool;
 
 static DEFINE_PER_CPU(unsigned long, ioc_count);
 static struct completion *ioc_gone;
@@ -1840,9 +1840,11 @@
 	return 1;
 }
 
-static void cfq_kick_queue(void *data)
+static void cfq_kick_queue(struct work_struct *work)
 {
-	request_queue_t *q = data;
+	struct cfq_data *cfqd =
+		container_of(work, struct cfq_data, unplug_work);
+	request_queue_t *q = cfqd->queue;
 	unsigned long flags;
 
 	spin_lock_irqsave(q->queue_lock, flags);
@@ -1986,7 +1988,7 @@
 	cfqd->idle_class_timer.function = cfq_idle_class_timer;
 	cfqd->idle_class_timer.data = (unsigned long) cfqd;
 
-	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
 	cfqd->cfq_quantum = cfq_quantum;
 	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 0f82e12..31512cd 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -34,7 +34,7 @@
  */
 #include <scsi/scsi_cmnd.h>
 
-static void blk_unplug_work(void *data);
+static void blk_unplug_work(struct work_struct *work);
 static void blk_unplug_timeout(unsigned long data);
 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
 static void init_request_from_bio(struct request *req, struct bio *bio);
@@ -44,17 +44,17 @@
 /*
  * For the allocated request tables
  */
-static kmem_cache_t *request_cachep;
+static struct kmem_cache *request_cachep;
 
 /*
  * For queue allocation
  */
-static kmem_cache_t *requestq_cachep;
+static struct kmem_cache *requestq_cachep;
 
 /*
  * For io context allocations
  */
-static kmem_cache_t *iocontext_cachep;
+static struct kmem_cache *iocontext_cachep;
 
 /*
  * Controlling structure to kblockd
@@ -227,7 +227,7 @@
 	if (q->unplug_delay == 0)
 		q->unplug_delay = 1;
 
-	INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+	INIT_WORK(&q->unplug_work, blk_unplug_work);
 
 	q->unplug_timer.function = blk_unplug_timeout;
 	q->unplug_timer.data = (unsigned long)q;
@@ -1631,9 +1631,9 @@
 	}
 }
 
-static void blk_unplug_work(void *data)
+static void blk_unplug_work(struct work_struct *work)
 {
-	request_queue_t *q = data;
+	request_queue_t *q = container_of(work, request_queue_t, unplug_work);
 
 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
 				q->rq.count[READ] + q->rq.count[WRITE]);
@@ -3459,8 +3459,6 @@
 	}
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
 static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
 			  void *hcpu)
 {
@@ -3486,8 +3484,6 @@
 	.notifier_call	= blk_cpu_notify,
 };
 
-#endif /* CONFIG_HOTPLUG_CPU */
-
 /**
  * blk_complete_request - end I/O on a request
  * @req:      the request being processed
diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c
index 9b5b156..2ebffb8 100644
--- a/crypto/cryptomgr.c
+++ b/crypto/cryptomgr.c
@@ -40,9 +40,10 @@
 	char template[CRYPTO_MAX_ALG_NAME];
 };
 
-static void cryptomgr_probe(void *data)
+static void cryptomgr_probe(struct work_struct *work)
 {
-	struct cryptomgr_param *param = data;
+	struct cryptomgr_param *param =
+		container_of(work, struct cryptomgr_param, work);
 	struct crypto_template *tmpl;
 	struct crypto_instance *inst;
 	int err;
@@ -112,7 +113,7 @@
 	param->larval.type = larval->alg.cra_flags;
 	param->larval.mask = larval->mask;
 
-	INIT_WORK(&param->work, cryptomgr_probe, param);
+	INIT_WORK(&param->work, cryptomgr_probe);
 	schedule_work(&param->work);
 
 	return NOTIFY_STOP;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 068fe4f..02b30ae 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -50,6 +50,7 @@
 struct acpi_os_dpc {
 	acpi_osd_exec_callback function;
 	void *context;
+	struct work_struct work;
 };
 
 #ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -564,12 +565,9 @@
 	acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
 }
 
-static void acpi_os_execute_deferred(void *context)
+static void acpi_os_execute_deferred(struct work_struct *work)
 {
-	struct acpi_os_dpc *dpc = NULL;
-
-
-	dpc = (struct acpi_os_dpc *)context;
+	struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
 	if (!dpc) {
 		printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
 		return;
@@ -602,7 +600,6 @@
 {
 	acpi_status status = AE_OK;
 	struct acpi_os_dpc *dpc;
-	struct work_struct *task;
 
 	ACPI_FUNCTION_TRACE("os_queue_for_execution");
 
@@ -615,28 +612,22 @@
 
 	/*
 	 * Allocate/initialize DPC structure.  Note that this memory will be
-	 * freed by the callee.  The kernel handles the tq_struct list  in a
+	 * freed by the callee.  The kernel handles the work_struct list  in a
 	 * way that allows us to also free its memory inside the callee.
 	 * Because we may want to schedule several tasks with different
 	 * parameters we can't use the approach some kernel code uses of
-	 * having a static tq_struct.
-	 * We can save time and code by allocating the DPC and tq_structs
-	 * from the same memory.
+	 * having a static work_struct.
 	 */
 
-	dpc =
-	    kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
-		    GFP_ATOMIC);
+	dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
 	if (!dpc)
 		return_ACPI_STATUS(AE_NO_MEMORY);
 
 	dpc->function = function;
 	dpc->context = context;
 
-	task = (void *)(dpc + 1);
-	INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
-
-	if (!queue_work(kacpid_wq, task)) {
+	INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+	if (!queue_work(kacpid_wq, &dpc->work)) {
 		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
 				  "Call to queue_work() failed.\n"));
 		kfree(dpc);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f8ec389..8816e30 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1081,7 +1081,7 @@
  *	ata_port_queue_task - Queue port_task
  *	@ap: The ata_port to queue port_task for
  *	@fn: workqueue function to be scheduled
- *	@data: data value to pass to workqueue function
+ *	@data: data for @fn to use
  *	@delay: delay time for workqueue function
  *
  *	Schedule @fn(@data) for execution after @delay jiffies using
@@ -1096,7 +1096,7 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
+void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
 			 unsigned long delay)
 {
 	int rc;
@@ -1104,12 +1104,10 @@
 	if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
 		return;
 
-	PREPARE_WORK(&ap->port_task, fn, data);
+	PREPARE_DELAYED_WORK(&ap->port_task, fn);
+	ap->port_task_data = data;
 
-	if (!delay)
-		rc = queue_work(ata_wq, &ap->port_task);
-	else
-		rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
+	rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
 
 	/* rc == 0 means that another user is using port task */
 	WARN_ON(rc == 0);
@@ -4588,10 +4586,11 @@
 	return poll_next;
 }
 
-static void ata_pio_task(void *_data)
+static void ata_pio_task(struct work_struct *work)
 {
-	struct ata_queued_cmd *qc = _data;
-	struct ata_port *ap = qc->ap;
+	struct ata_port *ap =
+		container_of(work, struct ata_port, port_task.work);
+	struct ata_queued_cmd *qc = ap->port_task_data;
 	u8 status;
 	int poll_next;
 
@@ -5635,9 +5634,9 @@
 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
 #endif
 
-	INIT_WORK(&ap->port_task, NULL, NULL);
-	INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
-	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
+	INIT_DELAYED_WORK(&ap->port_task, NULL);
+	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
 	INIT_LIST_HEAD(&ap->eh_done_q);
 	init_waitqueue_head(&ap->eh_wait_q);
 
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 76a85df..08ad44b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -332,7 +332,7 @@
 	if (ap->pflags & ATA_PFLAG_LOADING)
 		ap->pflags &= ~ATA_PFLAG_LOADING;
 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
-		queue_work(ata_aux_wq, &ap->hotplug_task);
+		queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
 
 	if (ap->pflags & ATA_PFLAG_RECOVERED)
 		ata_port_printk(ap, KERN_INFO, "EH complete\n");
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 8eaace9..664e137 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2963,7 +2963,7 @@
 
 /**
  *	ata_scsi_hotplug - SCSI part of hotplug
- *	@data: Pointer to ATA port to perform SCSI hotplug on
+ *	@work: Pointer to ATA port to perform SCSI hotplug on
  *
  *	Perform SCSI part of hotplug.  It's executed from a separate
  *	workqueue after EH completes.  This is necessary because SCSI
@@ -2973,9 +2973,10 @@
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void ata_scsi_hotplug(void *data)
+void ata_scsi_hotplug(struct work_struct *work)
 {
-	struct ata_port *ap = data;
+	struct ata_port *ap =
+		container_of(work, struct ata_port, hotplug_task.work);
 	int i;
 
 	if (ap->pflags & ATA_PFLAG_UNLOADING) {
@@ -3076,7 +3077,7 @@
 
 /**
  *	ata_scsi_dev_rescan - initiate scsi_rescan_device()
- *	@data: Pointer to ATA port to perform scsi_rescan_device()
+ *	@work: Pointer to ATA port to perform scsi_rescan_device()
  *
  *	After ATA pass thru (SAT) commands are executed successfully,
  *	libata need to propagate the changes to SCSI layer.  This
@@ -3086,9 +3087,10 @@
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void ata_scsi_dev_rescan(void *data)
+void ata_scsi_dev_rescan(struct work_struct *work)
 {
-	struct ata_port *ap = data;
+	struct ata_port *ap =
+		container_of(work, struct ata_port, scsi_rescan_task);
 	unsigned long flags;
 	unsigned int i;
 
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 107b2b5..81ae41d 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -94,7 +94,7 @@
 
 extern void ata_scsi_scan_host(struct ata_port *ap);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_hotplug(void *data);
+extern void ata_scsi_hotplug(struct work_struct *work);
 extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
 			       unsigned int buflen);
 
@@ -124,7 +124,7 @@
                         unsigned int (*actor) (struct ata_scsi_args *args,
                                            u8 *rbuf, unsigned int buflen));
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
-extern void ata_scsi_dev_rescan(void *data);
+extern void ata_scsi_dev_rescan(struct work_struct *work);
 extern int ata_bus_probe(struct ata_port *ap);
 
 /* libata-eh.c */
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index c7314a7..7d9b4e5 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -820,7 +820,7 @@
 		void *cpuaddr;
 
 #ifdef USE_RBPS_POOL 
-		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
+		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
 		if (cpuaddr == NULL)
 			return -ENOMEM;
 #else
@@ -884,7 +884,7 @@
 		void *cpuaddr;
 
 #ifdef USE_RBPL_POOL
-		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
+		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
 		if (cpuaddr == NULL)
 			return -ENOMEM;
 #else
@@ -1724,7 +1724,7 @@
 	struct he_tpd *tpd;
 	dma_addr_t dma_handle; 
 
-	tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);              
+	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
 	if (tpd == NULL)
 		return NULL;
 			
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 87b17c3..f407861 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -135,7 +135,7 @@
 			       int flags);
 static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
 			      char *page);
-static void idt77252_softint(void *dev_id);
+static void idt77252_softint(struct work_struct *work);
 
 
 static struct atmdev_ops idt77252_ops =
@@ -2866,9 +2866,10 @@
 }
 
 static void
-idt77252_softint(void *dev_id)
+idt77252_softint(struct work_struct *work)
 {
-	struct idt77252_dev *card = dev_id;
+	struct idt77252_dev *card =
+		container_of(work, struct idt77252_dev, tqueue);
 	u32 stat;
 	int done;
 
@@ -3697,7 +3698,7 @@
 	card->pcidev = pcidev;
 	sprintf(card->name, "idt77252-%d", card->index);
 
-	INIT_WORK(&card->tqueue, idt77252_softint, (void *)card);
+	INIT_WORK(&card->tqueue, idt77252_softint);
 
 	membase = pci_resource_start(pcidev, 1);
 	srambase = pci_resource_start(pcidev, 2);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index e4b530e..67b79a7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -386,6 +386,7 @@
 	INIT_LIST_HEAD(&dev->node);
 	init_MUTEX(&dev->sem);
 	device_init_wakeup(dev, 0);
+	set_dev_node(dev, -1);
 }
 
 #ifdef CONFIG_SYSFS_DEPRECATED
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index b2efbd4..dbe0735 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -126,7 +126,7 @@
 	} else if (allocation < size)
 		return NULL;
 
-	if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL)))
+	if (!(retval = kmalloc (sizeof *retval, GFP_KERNEL)))
 		return retval;
 
 	strlcpy (retval->name, name, sizeof retval->name);
@@ -297,7 +297,7 @@
 			}
 		}
 	}
-	if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) {
+	if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) {
 		if (mem_flags & __GFP_WAIT) {
 			DECLARE_WAITQUEUE (wait, current);
 
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c6b7d9c..74b9679 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -290,9 +290,8 @@
 
 static int block_size_init(void)
 {
-	sysfs_create_file(&memory_sysdev_class.kset.kobj,
-		&class_attr_block_size_bytes.attr);
-	return 0;
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_block_size_bytes.attr);
 }
 
 /*
@@ -323,12 +322,14 @@
 
 static int memory_probe_init(void)
 {
-	sysfs_create_file(&memory_sysdev_class.kset.kobj,
-		&class_attr_probe.attr);
-	return 0;
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_probe.attr);
 }
 #else
-#define memory_probe_init(...)	do {} while (0)
+static inline int memory_probe_init(void)
+{
+	return 0;
+}
 #endif
 
 /*
@@ -431,9 +432,12 @@
 {
 	unsigned int i;
 	int ret;
+	int err;
 
 	memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops;
 	ret = sysdev_class_register(&memory_sysdev_class);
+	if (ret)
+		goto out;
 
 	/*
 	 * Create entries for memory sections that were found
@@ -442,11 +446,19 @@
 	for (i = 0; i < NR_MEM_SECTIONS; i++) {
 		if (!valid_section_nr(i))
 			continue;
-		add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0);
+		err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0);
+		if (!ret)
+			ret = err;
 	}
 
-	memory_probe_init();
-	block_size_init();
-
+	err = memory_probe_init();
+	if (!ret)
+		ret = err;
+	err = block_size_init();
+	if (!ret)
+		ret = err;
+out:
+	if (ret)
+		printk(KERN_ERR "%s() failed: %d\n", __FUNCTION__, ret);
 	return ret;
 }
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 3d12b85..067a9e8 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -108,7 +108,6 @@
 	return rc;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void __cpuinit topology_remove_dev(unsigned int cpu)
 {
 	struct sys_device *sys_dev = get_cpu_sysdev(cpu);
@@ -136,7 +135,6 @@
 	}
 	return rc ? NOTIFY_BAD : NOTIFY_OK;
 }
-#endif
 
 static int __cpuinit topology_sysfs_init(void)
 {
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 742d074..8d81a3a 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -324,13 +324,13 @@
       Command->Next = Controller->FreeCommands;
       Controller->FreeCommands = Command;
       Controller->Commands[CommandIdentifier-1] = Command;
-      ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, SLAB_ATOMIC,
+      ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, GFP_ATOMIC,
 							&ScatterGatherDMA);
       if (ScatterGatherCPU == NULL)
 	  return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION");
 
       if (RequestSensePool != NULL) {
-  	  RequestSenseCPU = pci_pool_alloc(RequestSensePool, SLAB_ATOMIC,
+  	  RequestSenseCPU = pci_pool_alloc(RequestSensePool, GFP_ATOMIC,
 						&RequestSenseDMA);
   	  if (RequestSenseCPU == NULL) {
                 pci_pool_free(ScatterGatherPool, ScatterGatherCPU,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 17dc222..432c17e 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -168,7 +168,8 @@
 
 config CISS_SCSI_TAPE
 	bool "SCSI tape drive support for Smart Array 5xxx"
-	depends on BLK_CPQ_CISS_DA && SCSI && PROC_FS
+	depends on BLK_CPQ_CISS_DA && PROC_FS
+	depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA
 	help
 	  When enabled (Y), this option allows SCSI tape drives and SCSI medium
 	  changers (tape robots) to be accessed via a Compaq 5xxx array 
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 6d11122..2308e83 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -159,7 +159,7 @@
 void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
 void aoecmd_ata_rsp(struct sk_buff *);
 void aoecmd_cfg_rsp(struct sk_buff *);
-void aoecmd_sleepwork(void *vp);
+void aoecmd_sleepwork(struct work_struct *);
 struct sk_buff *new_skb(ulong);
 
 int aoedev_init(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index aa25f8b..478489c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -12,7 +12,7 @@
 #include <linux/netdevice.h>
 #include "aoe.h"
 
-static kmem_cache_t *buf_pool_cache;
+static struct kmem_cache *buf_pool_cache;
 
 static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
 {
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8a13b1a..97f7f53 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -408,9 +408,9 @@
 /* this function performs work that has been deferred until sleeping is OK
  */
 void
-aoecmd_sleepwork(void *vp)
+aoecmd_sleepwork(struct work_struct *work)
 {
-	struct aoedev *d = (struct aoedev *) vp;
+	struct aoedev *d = container_of(work, struct aoedev, work);
 
 	if (d->flags & DEVFL_GDALLOC)
 		aoeblk_gdalloc(d);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 6125921..05a9719 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -88,7 +88,7 @@
  			kfree(d);
 		return NULL;
 	}
-	INIT_WORK(&d->work, aoecmd_sleepwork, d);
+	INIT_WORK(&d->work, aoecmd_sleepwork);
 	spin_lock_init(&d->lock);
 	init_timer(&d->timer);
 	d->timer.data = (ulong) d;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4105c3b..892e092 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -47,14 +47,15 @@
 #include <linux/completion.h>
 
 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
+#define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
 
 /* Embedded module documentation macros - see modules.h */
 MODULE_AUTHOR("Hewlett-Packard Company");
-MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
+MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
 			" SA6i P600 P800 P400 P400i E200 E200i E500");
+MODULE_VERSION("3.6.14");
 MODULE_LICENSE("GPL");
 
 #include "cciss_cmd.h"
@@ -81,7 +82,9 @@
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3213},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3214},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3215},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3233},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3237},
+	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
+		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
 };
 
@@ -90,27 +93,29 @@
 /*  board_id = Subsystem Device ID & Vendor ID
  *  product = Marketing Name for the board
  *  access = Address of the struct of function pointers
+ *  nr_cmds = Number of commands supported by controller
  */
 static struct board_type products[] = {
-	{0x40700E11, "Smart Array 5300", &SA5_access},
-	{0x40800E11, "Smart Array 5i", &SA5B_access},
-	{0x40820E11, "Smart Array 532", &SA5B_access},
-	{0x40830E11, "Smart Array 5312", &SA5B_access},
-	{0x409A0E11, "Smart Array 641", &SA5_access},
-	{0x409B0E11, "Smart Array 642", &SA5_access},
-	{0x409C0E11, "Smart Array 6400", &SA5_access},
-	{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
-	{0x40910E11, "Smart Array 6i", &SA5_access},
-	{0x3225103C, "Smart Array P600", &SA5_access},
-	{0x3223103C, "Smart Array P800", &SA5_access},
-	{0x3234103C, "Smart Array P400", &SA5_access},
-	{0x3235103C, "Smart Array P400i", &SA5_access},
-	{0x3211103C, "Smart Array E200i", &SA5_access},
-	{0x3212103C, "Smart Array E200", &SA5_access},
-	{0x3213103C, "Smart Array E200i", &SA5_access},
-	{0x3214103C, "Smart Array E200i", &SA5_access},
-	{0x3215103C, "Smart Array E200i", &SA5_access},
-	{0x3233103C, "Smart Array E500", &SA5_access},
+	{0x40700E11, "Smart Array 5300", &SA5_access, 512},
+	{0x40800E11, "Smart Array 5i", &SA5B_access, 512},
+	{0x40820E11, "Smart Array 532", &SA5B_access, 512},
+	{0x40830E11, "Smart Array 5312", &SA5B_access, 512},
+	{0x409A0E11, "Smart Array 641", &SA5_access, 512},
+	{0x409B0E11, "Smart Array 642", &SA5_access, 512},
+	{0x409C0E11, "Smart Array 6400", &SA5_access, 512},
+	{0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
+	{0x40910E11, "Smart Array 6i", &SA5_access, 512},
+	{0x3225103C, "Smart Array P600", &SA5_access, 512},
+	{0x3223103C, "Smart Array P800", &SA5_access, 512},
+	{0x3234103C, "Smart Array P400", &SA5_access, 512},
+	{0x3235103C, "Smart Array P400i", &SA5_access, 512},
+	{0x3211103C, "Smart Array E200i", &SA5_access, 120},
+	{0x3212103C, "Smart Array E200", &SA5_access, 120},
+	{0x3213103C, "Smart Array E200i", &SA5_access, 120},
+	{0x3214103C, "Smart Array E200i", &SA5_access, 120},
+	{0x3215103C, "Smart Array E200i", &SA5_access, 120},
+	{0x3237103C, "Smart Array E500", &SA5_access, 512},
+	{0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
 };
 
 /* How long to wait (in milliseconds) for board to go into simple mode */
@@ -121,7 +126,6 @@
 #define MAX_CMD_RETRIES 3
 
 #define READ_AHEAD 	 1024
-#define NR_CMDS		 384	/* #commands that can be outstanding */
 #define MAX_CTLR	32
 
 /* Originally cciss driver only supports 8 major numbers */
@@ -137,7 +141,6 @@
 		       unsigned int cmd, unsigned long arg);
 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 
-static int revalidate_allvol(ctlr_info_t *host);
 static int cciss_revalidate(struct gendisk *disk);
 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
@@ -265,6 +268,7 @@
 		       "Firmware Version: %c%c%c%c\n"
 		       "IRQ: %d\n"
 		       "Logical drives: %d\n"
+		       "Max sectors: %d\n"
 		       "Current Q depth: %d\n"
 		       "Current # commands on controller: %d\n"
 		       "Max Q depth since init: %d\n"
@@ -275,7 +279,9 @@
 		       (unsigned long)h->board_id,
 		       h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
 		       h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
-		       h->num_luns, h->Qdepth, h->commands_outstanding,
+		       h->num_luns,
+		       h->cciss_max_sectors,
+		       h->Qdepth, h->commands_outstanding,
 		       h->maxQsinceinit, h->max_outstanding, h->maxSG);
 
 	pos += size;
@@ -400,8 +406,8 @@
 	} else {		/* get it out of the controllers pool */
 
 		do {
-			i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
-			if (i == NR_CMDS)
+			i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+			if (i == h->nr_cmds)
 				return NULL;
 		} while (test_and_set_bit
 			 (i & (BITS_PER_LONG - 1),
@@ -487,7 +493,7 @@
 	 * but I'm already using way to many device nodes to claim another one
 	 * for "raw controller".
 	 */
-	if (drv->nr_blocks == 0) {
+	if (drv->heads == 0) {
 		if (iminor(inode) != 0) {	/* not node 0? */
 			/* if not node 0 make sure it is a partition = 0 */
 			if (iminor(inode) & 0x0f) {
@@ -850,9 +856,7 @@
 		}
 
 	case CCISS_REVALIDVOLS:
-		if (bdev != bdev->bd_contains || drv != host->drv)
-			return -ENXIO;
-		return revalidate_allvol(host);
+		return rebuild_lun_table(host, NULL);
 
 	case CCISS_GETLUNINFO:{
 			LogvolInfo_struct luninfo;
@@ -1152,75 +1156,6 @@
 	}
 }
 
-/*
- * revalidate_allvol is for online array config utilities.  After a
- * utility reconfigures the drives in the array, it can use this function
- * (through an ioctl) to make the driver zap any previous disk structs for
- * that controller and get new ones.
- *
- * Right now I'm using the getgeometry() function to do this, but this
- * function should probably be finer grained and allow you to revalidate one
- * particular logical volume (instead of all of them on a particular
- * controller).
- */
-static int revalidate_allvol(ctlr_info_t *host)
-{
-	int ctlr = host->ctlr, i;
-	unsigned long flags;
-
-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-	if (host->usage_count > 1) {
-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-		printk(KERN_WARNING "cciss: Device busy for volume"
-		       " revalidation (usage=%d)\n", host->usage_count);
-		return -EBUSY;
-	}
-	host->usage_count++;
-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
-	for (i = 0; i < NWD; i++) {
-		struct gendisk *disk = host->gendisk[i];
-		if (disk) {
-			request_queue_t *q = disk->queue;
-
-			if (disk->flags & GENHD_FL_UP)
-				del_gendisk(disk);
-			if (q)
-				blk_cleanup_queue(q);
-		}
-	}
-
-	/*
-	 * Set the partition and block size structures for all volumes
-	 * on this controller to zero.  We will reread all of this data
-	 */
-	memset(host->drv, 0, sizeof(drive_info_struct)
-	       * CISS_MAX_LUN);
-	/*
-	 * Tell the array controller not to give us any interrupts while
-	 * we check the new geometry.  Then turn interrupts back on when
-	 * we're done.
-	 */
-	host->access.set_intr_mask(host, CCISS_INTR_OFF);
-	cciss_getgeometry(ctlr);
-	host->access.set_intr_mask(host, CCISS_INTR_ON);
-
-	/* Loop through each real device */
-	for (i = 0; i < NWD; i++) {
-		struct gendisk *disk = host->gendisk[i];
-		drive_info_struct *drv = &(host->drv[i]);
-		/* we must register the controller even if no disks exist */
-		/* this is for the online array utilities */
-		if (!drv->heads && i)
-			continue;
-		blk_queue_hardsect_size(drv->queue, drv->block_size);
-		set_capacity(disk, drv->nr_blocks);
-		add_disk(disk);
-	}
-	host->usage_count--;
-	return 0;
-}
-
 static inline void complete_buffers(struct bio *bio, int status)
 {
 	while (bio) {
@@ -1243,7 +1178,7 @@
 	 * in case the interrupt we serviced was from an ioctl and did not
 	 * free any new commands.
 	 */
-	if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
+	if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
 		return;
 
 	/* We have room on the queue for more commands.  Now we need to queue
@@ -1262,7 +1197,7 @@
 		/* check to see if we have maxed out the number of commands
 		 * that can be placed on the queue.
 		 */
-		if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
+		if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
 			if (curr_queue == start_queue) {
 				h->next_to_run =
 				    (start_queue + 1) % (h->highest_lun + 1);
@@ -1380,6 +1315,11 @@
 	/* if it's the controller it's already added */
 	if (drv_index) {
 		disk->queue = blk_init_queue(do_cciss_request, &h->lock);
+		sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
+		disk->major = h->major;
+		disk->first_minor = drv_index << NWD_SHIFT;
+		disk->fops = &cciss_fops;
+		disk->private_data = &h->drv[drv_index];
 
 		/* Set up queue information */
 		disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
@@ -1391,7 +1331,7 @@
 		/* This is a limit in the driver and could be eliminated. */
 		blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
 
-		blk_queue_max_sectors(disk->queue, 512);
+		blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
 
 		blk_queue_softirq_done(disk->queue, cciss_softirq_done);
 
@@ -1458,11 +1398,6 @@
 
 	/* Set busy_configuring flag for this operation */
 	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-	if (h->num_luns >= CISS_MAX_LUN) {
-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-		return -EINVAL;
-	}
-
 	if (h->busy_configuring) {
 		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
 		return -EBUSY;
@@ -1495,17 +1430,8 @@
 					      0, 0, TYPE_CMD);
 
 		if (return_code == IO_OK) {
-			listlength |=
-			    (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
-			    << 24;
-			listlength |=
-			    (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
-			    << 16;
-			listlength |=
-			    (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
-			    << 8;
-			listlength |=
-			    0xff & (unsigned int)(ld_buff->LUNListLength[3]);
+			listlength =
+				be32_to_cpu(*(__u32 *) ld_buff->LUNListLength);
 		} else {	/* reading number of logical volumes failed */
 			printk(KERN_WARNING "cciss: report logical volume"
 			       " command failed\n");
@@ -1556,6 +1482,14 @@
 				if (drv_index == -1)
 					goto freeret;
 
+				/*Check if the gendisk needs to be allocated */
+				if (!h->gendisk[drv_index]){
+					h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
+					if (!h->gendisk[drv_index]){
+						printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
+						goto mem_msg;
+					}
+				}
 			}
 			h->drv[drv_index].LunID = lunid;
 			cciss_update_drive_info(ctlr, drv_index);
@@ -1593,6 +1527,7 @@
 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
 			   int clear_all)
 {
+	int i;
 	ctlr_info_t *h = get_host(disk);
 
 	if (!capable(CAP_SYS_RAWIO))
@@ -1616,9 +1551,35 @@
 				del_gendisk(disk);
 			if (q) {
 				blk_cleanup_queue(q);
+				/* Set drv->queue to NULL so that we do not try
+				 * to call blk_start_queue on this queue in the
+				 * interrupt handler
+				 */
 				drv->queue = NULL;
 			}
+			/* If clear_all is set then we are deleting the logical
+			 * drive, not just refreshing its info.  For drives
+			 * other than disk 0 we will call put_disk.  We do not
+			 * do this for disk 0 as we need it to be able to
+			 * configure the controller.
+			*/
+			if (clear_all){
+				/* This isn't pretty, but we need to find the
+				 * disk in our array and NULL our the pointer.
+				 * This is so that we will call alloc_disk if
+				 * this index is used again later.
+				*/
+				for (i=0; i < CISS_MAX_LUN; i++){
+					if(h->gendisk[i] == disk){
+						h->gendisk[i] = NULL;
+						break;
+					}
+				}
+				put_disk(disk);
+			}
 		}
+	} else {
+		set_capacity(disk, 0);
 	}
 
 	--h->num_luns;
@@ -2136,7 +2097,7 @@
 
 	/* We've sent down an abort or reset, but something else
 	   has completed */
-	if (srl->ncompletions >= (NR_CMDS + 2)) {
+	if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
 		/* Uh oh.  No room to save it for later... */
 		printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
 		       "reject list overflow, command lost!\n", ctlr);
@@ -2673,7 +2634,7 @@
 			a1 = a;
 			if ((a & 0x04)) {
 				a2 = (a >> 3);
-				if (a2 >= NR_CMDS) {
+				if (a2 >= h->nr_cmds) {
 					printk(KERN_WARNING
 					       "cciss: controller cciss%d failed, stopping.\n",
 					       h->ctlr);
@@ -2827,23 +2788,21 @@
 		if (err > 0) {
 			printk(KERN_WARNING "cciss: only %d MSI-X vectors "
 			       "available\n", err);
+			goto default_int_mode;
 		} else {
 			printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
 			       err);
+			goto default_int_mode;
 		}
 	}
 	if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
 		if (!pci_enable_msi(pdev)) {
-			c->intr[SIMPLE_MODE_INT] = pdev->irq;
 			c->msi_vector = 1;
-			return;
 		} else {
 			printk(KERN_WARNING "cciss: MSI init failed\n");
-			c->intr[SIMPLE_MODE_INT] = pdev->irq;
-			return;
 		}
 	}
-      default_int_mode:
+default_int_mode:
 #endif				/* CONFIG_PCI_MSI */
 	/* if we get here we're going to use the default interrupt mode */
 	c->intr[SIMPLE_MODE_INT] = pdev->irq;
@@ -2956,16 +2915,10 @@
 		if (board_id == products[i].board_id) {
 			c->product_name = products[i].product_name;
 			c->access = *(products[i].access);
+			c->nr_cmds = products[i].nr_cmds;
 			break;
 		}
 	}
-	if (i == ARRAY_SIZE(products)) {
-		printk(KERN_WARNING "cciss: Sorry, I don't know how"
-		       " to access the Smart Array controller %08lx\n",
-		       (unsigned long)board_id);
-		err = -ENODEV;
-		goto err_out_free_res;
-	}
 	if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
 	    (readb(&c->cfgtable->Signature[1]) != 'I') ||
 	    (readb(&c->cfgtable->Signature[2]) != 'S') ||
@@ -2974,6 +2927,27 @@
 		err = -ENODEV;
 		goto err_out_free_res;
 	}
+	/* We didn't find the controller in our list. We know the
+	 * signature is valid. If it's an HP device let's try to
+	 * bind to the device and fire it up. Otherwise we bail.
+	 */
+	if (i == ARRAY_SIZE(products)) {
+		if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
+			c->product_name = products[i-1].product_name;
+			c->access = *(products[i-1].access);
+			c->nr_cmds = products[i-1].nr_cmds;
+			printk(KERN_WARNING "cciss: This is an unknown "
+				"Smart Array controller.\n"
+				"cciss: Please update to the latest driver "
+				"available from www.hp.com.\n");
+		} else {
+			printk(KERN_WARNING "cciss: Sorry, I don't know how"
+				" to access the Smart Array controller %08lx\n"
+					, (unsigned long)board_id);
+			err = -ENODEV;
+			goto err_out_free_res;
+		}
+	}
 #ifdef CONFIG_X86
 	{
 		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
@@ -2984,6 +2958,17 @@
 	}
 #endif
 
+	/* Disabling DMA prefetch for the P600
+	 * An ASIC bug may result in a prefetch beyond
+	 * physical memory.
+	 */
+	if(board_id == 0x3225103C) {
+		__u32 dma_prefetch;
+		dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
+		dma_prefetch |= 0x8000;
+		writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
+	}
+
 #ifdef CCISS_DEBUG
 	printk("Trying to put board into Simple mode\n");
 #endif				/* CCISS_DEBUG */
@@ -3158,13 +3143,7 @@
 /* Returns -1 if no free entries are left.  */
 static int alloc_cciss_hba(void)
 {
-	struct gendisk *disk[NWD];
-	int i, n;
-	for (n = 0; n < NWD; n++) {
-		disk[n] = alloc_disk(1 << NWD_SHIFT);
-		if (!disk[n])
-			goto out;
-	}
+	int i;
 
 	for (i = 0; i < MAX_CTLR; i++) {
 		if (!hba[i]) {
@@ -3172,20 +3151,18 @@
 			p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
 			if (!p)
 				goto Enomem;
-			for (n = 0; n < NWD; n++)
-				p->gendisk[n] = disk[n];
+			p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
+			if (!p->gendisk[0])
+				goto Enomem;
 			hba[i] = p;
 			return i;
 		}
 	}
 	printk(KERN_WARNING "cciss: This driver supports a maximum"
 	       " of %d controllers.\n", MAX_CTLR);
-	goto out;
-      Enomem:
+	return -1;
+Enomem:
 	printk(KERN_ERR "cciss: out of memory.\n");
-      out:
-	while (n--)
-		put_disk(disk[n]);
 	return -1;
 }
 
@@ -3195,7 +3172,7 @@
 	int n;
 
 	hba[i] = NULL;
-	for (n = 0; n < NWD; n++)
+	for (n = 0; n < CISS_MAX_LUN; n++)
 		put_disk(p->gendisk[n]);
 	kfree(p);
 }
@@ -3208,9 +3185,8 @@
 static int __devinit cciss_init_one(struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
 {
-	request_queue_t *q;
 	int i;
-	int j;
+	int j = 0;
 	int rc;
 	int dac;
 
@@ -3269,15 +3245,15 @@
 	       hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
 
 	hba[i]->cmd_pool_bits =
-	    kmalloc(((NR_CMDS + BITS_PER_LONG -
+	    kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
 		      1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
 	hba[i]->cmd_pool = (CommandList_struct *)
 	    pci_alloc_consistent(hba[i]->pdev,
-		    NR_CMDS * sizeof(CommandList_struct),
+		    hba[i]->nr_cmds * sizeof(CommandList_struct),
 		    &(hba[i]->cmd_pool_dhandle));
 	hba[i]->errinfo_pool = (ErrorInfo_struct *)
 	    pci_alloc_consistent(hba[i]->pdev,
-		    NR_CMDS * sizeof(ErrorInfo_struct),
+		    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
 		    &(hba[i]->errinfo_pool_dhandle));
 	if ((hba[i]->cmd_pool_bits == NULL)
 	    || (hba[i]->cmd_pool == NULL)
@@ -3288,7 +3264,7 @@
 #ifdef CONFIG_CISS_SCSI_TAPE
 	hba[i]->scsi_rejects.complete =
 	    kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
-		    (NR_CMDS + 5), GFP_KERNEL);
+		    (hba[i]->nr_cmds + 5), GFP_KERNEL);
 	if (hba[i]->scsi_rejects.complete == NULL) {
 		printk(KERN_ERR "cciss: out of memory");
 		goto clean4;
@@ -3302,7 +3278,7 @@
 	/* command and error info recs zeroed out before
 	   they are used */
 	memset(hba[i]->cmd_pool_bits, 0,
-	       ((NR_CMDS + BITS_PER_LONG -
+	       ((hba[i]->nr_cmds + BITS_PER_LONG -
 		 1) / BITS_PER_LONG) * sizeof(unsigned long));
 
 #ifdef CCISS_DEBUG
@@ -3317,18 +3293,34 @@
 	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
 
 	cciss_procinit(i);
+
+	hba[i]->cciss_max_sectors = 2048;
+
 	hba[i]->busy_initializing = 0;
 
-	for (j = 0; j < NWD; j++) {	/* mfm */
+	do {
 		drive_info_struct *drv = &(hba[i]->drv[j]);
 		struct gendisk *disk = hba[i]->gendisk[j];
+		request_queue_t *q;
+
+		/* Check if the disk was allocated already */
+		if (!disk){
+			hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
+			disk = hba[i]->gendisk[j];
+		}
+
+		/* Check that the disk was able to be allocated */
+		if (!disk) {
+			printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
+			goto clean4;
+		}
 
 		q = blk_init_queue(do_cciss_request, &hba[i]->lock);
 		if (!q) {
 			printk(KERN_ERR
 			       "cciss:  unable to allocate queue for disk %d\n",
 			       j);
-			break;
+			goto clean4;
 		}
 		drv->queue = q;
 
@@ -3341,7 +3333,7 @@
 		/* This is a limit in the driver and could be eliminated. */
 		blk_queue_max_phys_segments(q, MAXSGENTRIES);
 
-		blk_queue_max_sectors(q, 512);
+		blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
 
 		blk_queue_softirq_done(q, cciss_softirq_done);
 
@@ -3360,7 +3352,8 @@
 		blk_queue_hardsect_size(q, drv->block_size);
 		set_capacity(disk, drv->nr_blocks);
 		add_disk(disk);
-	}
+		j++;
+	} while (j <= hba[i]->highest_lun);
 
 	return 1;
 
@@ -3371,11 +3364,11 @@
 	kfree(hba[i]->cmd_pool_bits);
 	if (hba[i]->cmd_pool)
 		pci_free_consistent(hba[i]->pdev,
-				    NR_CMDS * sizeof(CommandList_struct),
+				    hba[i]->nr_cmds * sizeof(CommandList_struct),
 				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
 	if (hba[i]->errinfo_pool)
 		pci_free_consistent(hba[i]->pdev,
-				    NR_CMDS * sizeof(ErrorInfo_struct),
+				    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
 				    hba[i]->errinfo_pool,
 				    hba[i]->errinfo_pool_dhandle);
 	free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
@@ -3383,6 +3376,15 @@
 	unregister_blkdev(hba[i]->major, hba[i]->devname);
       clean1:
 	hba[i]->busy_initializing = 0;
+	/* cleanup any queues that may have been initialized */
+	for (j=0; j <= hba[i]->highest_lun; j++){
+		drive_info_struct *drv = &(hba[i]->drv[j]);
+		if (drv->queue)
+			blk_cleanup_queue(drv->queue);
+	}
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
 	free_hba(i);
 	return -1;
 }
@@ -3430,7 +3432,7 @@
 	remove_proc_entry(hba[i]->devname, proc_cciss);
 
 	/* remove it from the disk list */
-	for (j = 0; j < NWD; j++) {
+	for (j = 0; j < CISS_MAX_LUN; j++) {
 		struct gendisk *disk = hba[i]->gendisk[j];
 		if (disk) {
 			request_queue_t *q = disk->queue;
@@ -3442,9 +3444,9 @@
 		}
 	}
 
-	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
+	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
 			    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
-	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
+	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
 			    hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
 	kfree(hba[i]->cmd_pool_bits);
 #ifdef CONFIG_CISS_SCSI_TAPE
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 562235c..b70988d 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -6,7 +6,6 @@
 #include "cciss_cmd.h"
 
 
-#define NWD		16
 #define NWD_SHIFT	4
 #define MAX_PART	(1 << NWD_SHIFT)
 
@@ -60,6 +59,7 @@
 	__u32	board_id;
 	void __iomem *vaddr;
 	unsigned long paddr;
+	int 	nr_cmds; /* Number of commands allowed on this controller */
 	CfgTable_struct __iomem *cfgtable;
 	int	interrupts_enabled;
 	int	major;
@@ -76,6 +76,7 @@
 	unsigned int intr[4];
 	unsigned int msix_vector;
 	unsigned int msi_vector;
+	int 	cciss_max_sectors;
 	BYTE	cciss_read;
 	BYTE	cciss_write;
 	BYTE	cciss_read_capacity;
@@ -110,7 +111,7 @@
 	int			next_to_run;
 
 	// Disk structures we need to pass back
-	struct gendisk   *gendisk[NWD];
+	struct gendisk   *gendisk[CISS_MAX_LUN];
 #ifdef CONFIG_CISS_SCSI_TAPE
 	void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
 	/* list of block side commands the scsi error handling sucked up */
@@ -282,6 +283,7 @@
 	__u32	board_id;
 	char	*product_name;
 	struct access_method *access;
+	int nr_cmds; /* Max cmds this kind of ctlr can handle. */
 };
 
 #define CCISS_LOCK(i)	(&hba[i]->lock)
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 4af7c4c..43bf559 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -55,6 +55,7 @@
 #define I2O_INT_MASK            0x34
 #define I2O_IBPOST_Q            0x40
 #define I2O_OBPOST_Q            0x44
+#define I2O_DMA1_CFG		0x214
 
 //Configuration Table
 #define CFGTBL_ChangeReq        0x00000001l
@@ -88,7 +89,7 @@
 //###########################################################################
 //STRUCTURES
 //###########################################################################
-#define CISS_MAX_LUN	16	
+#define CISS_MAX_LUN	1024
 #define CISS_MAX_PHYS_LUN	1024
 // SCSI-3 Cmmands 
 
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e6d3a8..3f1b382 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -992,11 +992,11 @@
 {
 }
 
-static DECLARE_WORK(floppy_work, NULL, NULL);
+static DECLARE_WORK(floppy_work, NULL);
 
 static void schedule_bh(void (*handler) (void))
 {
-	PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL);
+	PREPARE_WORK(&floppy_work, (work_func_t)handler);
 	schedule_work(&floppy_work);
 }
 
@@ -1008,7 +1008,7 @@
 
 	spin_lock_irqsave(&floppy_lock, flags);
 	do_floppy = NULL;
-	PREPARE_WORK(&floppy_work, (void *)empty, NULL);
+	PREPARE_WORK(&floppy_work, (work_func_t)empty);
 	del_timer(&fd_timer);
 	spin_unlock_irqrestore(&floppy_lock, flags);
 }
@@ -1868,7 +1868,7 @@
 	printk("fdc_busy=%lu\n", fdc_busy);
 	if (do_floppy)
 		printk("do_floppy=%p\n", do_floppy);
-	if (floppy_work.pending)
+	if (work_pending(&floppy_work))
 		printk("floppy_work.func=%p\n", floppy_work.func);
 	if (timer_pending(&fd_timer))
 		printk("fd_timer.function=%p\n", fd_timer.function);
@@ -4498,7 +4498,7 @@
 		printk("floppy timer still active:%s\n", timeout_message);
 	if (timer_pending(&fd_timer))
 		printk("auxiliary floppy timer still active\n");
-	if (floppy_work.pending)
+	if (work_pending(&floppy_work))
 		printk("work still pending\n");
 #endif
 	old_fdc = fdc;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9d1035e..7bf2cfb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -355,14 +355,30 @@
 	return NULL;
 }
 
+static ssize_t pid_show(struct gendisk *disk, char *page)
+{
+	return sprintf(page, "%ld\n",
+		(long) ((struct nbd_device *)disk->private_data)->pid);
+}
+
+static struct disk_attribute pid_attr = {
+	.attr = { .name = "pid", .mode = S_IRUGO },
+	.show = pid_show,
+};
+
 static void nbd_do_it(struct nbd_device *lo)
 {
 	struct request *req;
 
 	BUG_ON(lo->magic != LO_MAGIC);
 
+	lo->pid = current->pid;
+	sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
+
 	while ((req = nbd_read_stat(lo)) != NULL)
 		nbd_end_request(req);
+
+	sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
 	return;
 }
 
diff --git a/drivers/block/paride/aten.c b/drivers/block/paride/aten.c
index c4d696d..2695465 100644
--- a/drivers/block/paride/aten.c
+++ b/drivers/block/paride/aten.c
@@ -149,12 +149,12 @@
 
 static int __init aten_init(void)
 {
-	return pi_register(&aten)-1;
+	return paride_register(&aten);
 }
 
 static void __exit aten_exit(void)
 {
-	pi_unregister( &aten );
+	paride_unregister( &aten );
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c
index d462ff6..4f27e73 100644
--- a/drivers/block/paride/bpck.c
+++ b/drivers/block/paride/bpck.c
@@ -464,12 +464,12 @@
 
 static int __init bpck_init(void)
 {
-	return pi_register(&bpck)-1;
+	return paride_register(&bpck);
 }
 
 static void __exit bpck_exit(void)
 {
-	pi_unregister(&bpck);
+	paride_unregister(&bpck);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c
index 41a237c..ad12452 100644
--- a/drivers/block/paride/bpck6.c
+++ b/drivers/block/paride/bpck6.c
@@ -31,10 +31,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <asm/io.h>
-
-#if defined(CONFIG_PARPORT_MODULE)||defined(CONFIG_PARPORT)
 #include <linux/parport.h>
-#endif
 
 #include "ppc6lnx.c"
 #include "paride.h"
@@ -139,11 +136,6 @@
 	PPCSTRUCT(pi)->ppc_id=pi->unit;
 	PPCSTRUCT(pi)->lpt_addr=pi->port;
 
-#ifdef CONFIG_PARPORT_PC_MODULE
-#define CONFIG_PARPORT_PC
-#endif
-
-#ifdef CONFIG_PARPORT_PC
 	/* look at the parport device to see if what modes we can use */
 	if(((struct pardevice *)(pi->pardev))->port->modes & 
 		(PARPORT_MODE_EPP)
@@ -161,11 +153,6 @@
 	{
 		return 1;
 	}
-#else
-	/* there is no way of knowing what kind of port we have
-	   default to the highest mode possible */
-	return 5;
-#endif
 }
 
 static int bpck6_probe_unit ( PIA *pi )
@@ -265,12 +252,12 @@
 	printk(KERN_INFO "bpck6: Copyright 2001 by Micro Solutions, Inc., DeKalb IL. USA\n");
 	if(verbose)
 		printk(KERN_DEBUG "bpck6: verbose debug enabled.\n");
-	return pi_register(&bpck6) - 1;  
+	return paride_register(&bpck6);
 }
 
 static void __exit bpck6_exit(void)
 {
-	pi_unregister(&bpck6);
+	paride_unregister(&bpck6);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/comm.c b/drivers/block/paride/comm.c
index 43d6135..9bcd354 100644
--- a/drivers/block/paride/comm.c
+++ b/drivers/block/paride/comm.c
@@ -205,12 +205,12 @@
 
 static int __init comm_init(void)
 {
-	return pi_register(&comm)-1;
+	return paride_register(&comm);
 }
 
 static void __exit comm_exit(void)
 {
-	pi_unregister(&comm);
+	paride_unregister(&comm);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/dstr.c b/drivers/block/paride/dstr.c
index 04d53bf..accc5c7 100644
--- a/drivers/block/paride/dstr.c
+++ b/drivers/block/paride/dstr.c
@@ -220,12 +220,12 @@
 
 static int __init dstr_init(void)
 {
-	return pi_register(&dstr)-1;
+	return paride_register(&dstr);
 }
 
 static void __exit dstr_exit(void)
 {
-	pi_unregister(&dstr);
+	paride_unregister(&dstr);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/epat.c b/drivers/block/paride/epat.c
index 55d1c0a..1bcdff7 100644
--- a/drivers/block/paride/epat.c
+++ b/drivers/block/paride/epat.c
@@ -327,12 +327,12 @@
 #ifdef CONFIG_PARIDE_EPATC8
 	epatc8 = 1;
 #endif
-	return pi_register(&epat)-1;
+	return paride_register(&epat);
 }
 
 static void __exit epat_exit(void)
 {
-	pi_unregister(&epat);
+	paride_unregister(&epat);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/epia.c b/drivers/block/paride/epia.c
index 0f2e0c2..fb0e782 100644
--- a/drivers/block/paride/epia.c
+++ b/drivers/block/paride/epia.c
@@ -303,12 +303,12 @@
 
 static int __init epia_init(void)
 {
-	return pi_register(&epia)-1;
+	return paride_register(&epia);
 }
 
 static void __exit epia_exit(void)
 {
-	pi_unregister(&epia);
+	paride_unregister(&epia);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/fit2.c b/drivers/block/paride/fit2.c
index e0f0691..3812837 100644
--- a/drivers/block/paride/fit2.c
+++ b/drivers/block/paride/fit2.c
@@ -138,12 +138,12 @@
 
 static int __init fit2_init(void)
 {
-	return pi_register(&fit2)-1;
+	return paride_register(&fit2);
 }
 
 static void __exit fit2_exit(void)
 {
-	pi_unregister(&fit2);
+	paride_unregister(&fit2);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/fit3.c b/drivers/block/paride/fit3.c
index 15400e7..275d269 100644
--- a/drivers/block/paride/fit3.c
+++ b/drivers/block/paride/fit3.c
@@ -198,12 +198,12 @@
 
 static int __init fit3_init(void)
 {
-	return pi_register(&fit3)-1;
+	return paride_register(&fit3);
 }
 
 static void __exit fit3_exit(void)
 {
-	pi_unregister(&fit3);
+	paride_unregister(&fit3);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/friq.c b/drivers/block/paride/friq.c
index 5ea2904..4f2ba24 100644
--- a/drivers/block/paride/friq.c
+++ b/drivers/block/paride/friq.c
@@ -263,12 +263,12 @@
 
 static int __init friq_init(void)
 {
-	return pi_register(&friq)-1;
+	return paride_register(&friq);
 }
 
 static void __exit friq_exit(void)
 {
-	pi_unregister(&friq);
+	paride_unregister(&friq);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/frpw.c b/drivers/block/paride/frpw.c
index 56b3824..c3cde36 100644
--- a/drivers/block/paride/frpw.c
+++ b/drivers/block/paride/frpw.c
@@ -300,12 +300,12 @@
 
 static int __init frpw_init(void)
 {
-	return pi_register(&frpw)-1;
+	return paride_register(&frpw);
 }
 
 static void __exit frpw_exit(void)
 {
-	pi_unregister(&frpw);
+	paride_unregister(&frpw);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/jumbo b/drivers/block/paride/jumbo
deleted file mode 100644
index e793b9c..0000000
--- a/drivers/block/paride/jumbo
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/sh
-#
-# This script can be used to build "jumbo" modules that contain the
-# base PARIDE support, one protocol module and one high-level driver.
-#
-echo -n "High level driver [pcd] : "
-read X
-HLD=${X:-pcd}
-#
-echo -n "Protocol module [bpck] : "
-read X
-PROTO=${X:-bpck}
-#
-echo -n "Use MODVERSIONS [y] ? "
-read X
-UMODV=${X:-y}
-#
-echo -n "For SMP kernel [n] ? "
-read X
-USMP=${X:-n}
-#
-echo -n "Support PARPORT [n] ? "
-read X
-UPARP=${X:-n}
-#
-echo
-#
-case $USMP in
-	y* | Y* ) FSMP="-DCONFIG_SMP"
-		  ;;
-	*)	  FSMP=""
-		  ;;
-esac
-#
-MODI="-include ../../../include/linux/modversions.h"
-#
-case $UMODV in
-	y* | Y* ) FMODV="-DMODVERSIONS $MODI"
-		  ;;
-	*)	  FMODV=""
-		  ;;
-esac
-#
-case $UPARP in
-	y* | Y* ) FPARP="-DCONFIG_PARPORT"
-		  ;;
-	*)	  FPARP=""
-		  ;;
-esac
-#
-TARG=$HLD-$PROTO.o
-FPROTO=-DCONFIG_PARIDE_`echo "$PROTO" | tr [a-z] [A-Z]`
-FK="-D__KERNEL__ -I ../../../include"
-FLCH=-D_LINUX_CONFIG_H
-#
-echo cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
-cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
-#
-echo cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
-cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
-#
-echo cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
-cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
-#
-echo ld -r -o $TARG Jp.o Jb.o Jd.o
-ld -r -o $TARG Jp.o Jb.o Jd.o
-#
-#
-rm Jp.o Jb.o Jd.o
-#
diff --git a/drivers/block/paride/kbic.c b/drivers/block/paride/kbic.c
index d983bce..35999c4 100644
--- a/drivers/block/paride/kbic.c
+++ b/drivers/block/paride/kbic.c
@@ -283,13 +283,21 @@
 
 static int __init kbic_init(void)
 {
-	return (pi_register(&k951)||pi_register(&k971))-1;
+	int rv;
+
+	rv = paride_register(&k951);
+	if (rv < 0)
+		return rv;
+	rv = paride_register(&k971);
+	if (rv < 0)
+		paride_unregister(&k951);
+	return rv;
 }
 
 static void __exit kbic_exit(void)
 {
-	pi_unregister(&k951);
-	pi_unregister(&k971);
+	paride_unregister(&k951);
+	paride_unregister(&k971);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/ktti.c b/drivers/block/paride/ktti.c
index 6c7edbf..117ab0e 100644
--- a/drivers/block/paride/ktti.c
+++ b/drivers/block/paride/ktti.c
@@ -115,12 +115,12 @@
 
 static int __init ktti_init(void)
 {
-	return pi_register(&ktti)-1;
+	return paride_register(&ktti);
 }
 
 static void __exit ktti_exit(void)
 {
-	pi_unregister(&ktti);
+	paride_unregister(&ktti);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/on20.c b/drivers/block/paride/on20.c
index 9f8e010..0173697 100644
--- a/drivers/block/paride/on20.c
+++ b/drivers/block/paride/on20.c
@@ -140,12 +140,12 @@
 
 static int __init on20_init(void)
 {
-	return pi_register(&on20)-1;
+	return paride_register(&on20);
 }
 
 static void __exit on20_exit(void)
 {
-	pi_unregister(&on20);
+	paride_unregister(&on20);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/on26.c b/drivers/block/paride/on26.c
index 0f833ca..95ba256 100644
--- a/drivers/block/paride/on26.c
+++ b/drivers/block/paride/on26.c
@@ -306,12 +306,12 @@
 
 static int __init on26_init(void)
 {
-	return pi_register(&on26)-1;
+	return paride_register(&on26);
 }
 
 static void __exit on26_exit(void)
 {
-	pi_unregister(&on26);
+	paride_unregister(&on26);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c
index 4b258f7..48c50f1 100644
--- a/drivers/block/paride/paride.c
+++ b/drivers/block/paride/paride.c
@@ -29,14 +29,7 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/sched.h>	/* TASK_* */
-
-#ifdef CONFIG_PARPORT_MODULE
-#define CONFIG_PARPORT
-#endif
-
-#ifdef CONFIG_PARPORT
 #include <linux/parport.h>
-#endif
 
 #include "paride.h"
 
@@ -76,8 +69,6 @@
 
 EXPORT_SYMBOL(pi_read_block);
 
-#ifdef CONFIG_PARPORT
-
 static void pi_wake_up(void *p)
 {
 	PIA *pi = (PIA *) p;
@@ -100,11 +91,8 @@
 		cont();
 }
 
-#endif
-
 int pi_schedule_claimed(PIA * pi, void (*cont) (void))
 {
-#ifdef CONFIG_PARPORT
 	unsigned long flags;
 
 	spin_lock_irqsave(&pi_spinlock, flags);
@@ -115,7 +103,6 @@
 	}
 	pi->claimed = 1;
 	spin_unlock_irqrestore(&pi_spinlock, flags);
-#endif
 	return 1;
 }
 EXPORT_SYMBOL(pi_schedule_claimed);
@@ -133,20 +120,16 @@
 	if (pi->claimed)
 		return;
 	pi->claimed = 1;
-#ifdef CONFIG_PARPORT
 	if (pi->pardev)
 		wait_event(pi->parq,
 			   !parport_claim((struct pardevice *) pi->pardev));
-#endif
 }
 
 static void pi_unclaim(PIA * pi)
 {
 	pi->claimed = 0;
-#ifdef CONFIG_PARPORT
 	if (pi->pardev)
 		parport_release((struct pardevice *) (pi->pardev));
-#endif
 }
 
 void pi_connect(PIA * pi)
@@ -167,21 +150,15 @@
 
 static void pi_unregister_parport(PIA * pi)
 {
-#ifdef CONFIG_PARPORT
 	if (pi->pardev) {
 		parport_unregister_device((struct pardevice *) (pi->pardev));
 		pi->pardev = NULL;
 	}
-#endif
 }
 
 void pi_release(PIA * pi)
 {
 	pi_unregister_parport(pi);
-#ifndef CONFIG_PARPORT
-	if (pi->reserved)
-		release_region(pi->port, pi->reserved);
-#endif				/* !CONFIG_PARPORT */
 	if (pi->proto->release_proto)
 		pi->proto->release_proto(pi);
 	module_put(pi->proto->owner);
@@ -229,7 +206,7 @@
 	return res;
 }
 
-int pi_register(PIP * pr)
+int paride_register(PIP * pr)
 {
 	int k;
 
@@ -237,24 +214,24 @@
 		if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) {
 			printk("paride: %s protocol already registered\n",
 			       pr->name);
-			return 0;
+			return -1;
 		}
 	k = 0;
 	while ((k < MAX_PROTOS) && (protocols[k]))
 		k++;
 	if (k == MAX_PROTOS) {
 		printk("paride: protocol table full\n");
-		return 0;
+		return -1;
 	}
 	protocols[k] = pr;
 	pr->index = k;
 	printk("paride: %s registered as protocol %d\n", pr->name, k);
-	return 1;
+	return 0;
 }
 
-EXPORT_SYMBOL(pi_register);
+EXPORT_SYMBOL(paride_register);
 
-void pi_unregister(PIP * pr)
+void paride_unregister(PIP * pr)
 {
 	if (!pr)
 		return;
@@ -265,12 +242,10 @@
 	protocols[pr->index] = NULL;
 }
 
-EXPORT_SYMBOL(pi_unregister);
+EXPORT_SYMBOL(paride_unregister);
 
 static int pi_register_parport(PIA * pi, int verbose)
 {
-#ifdef CONFIG_PARPORT
-
 	struct parport *port;
 
 	port = parport_find_base(pi->port);
@@ -290,7 +265,6 @@
 		printk("%s: 0x%x is %s\n", pi->device, pi->port, port->name);
 
 	pi->parname = (char *) port->name;
-#endif
 
 	return 1;
 }
@@ -447,13 +421,6 @@
 			printk("%s: Adapter not found\n", device);
 		return 0;
 	}
-#ifndef CONFIG_PARPORT
-	if (!request_region(pi->port, pi->reserved, pi->device)) {
-		printk(KERN_WARNING "paride: Unable to request region 0x%x\n",
-		       pi->port);
-		return 0;
-	}
-#endif				/* !CONFIG_PARPORT */
 
 	if (pi->parname)
 		printk("%s: Sharing %s at 0x%x\n", pi->device,
diff --git a/drivers/block/paride/paride.h b/drivers/block/paride/paride.h
index c6d98ef..2bddbf4 100644
--- a/drivers/block/paride/paride.h
+++ b/drivers/block/paride/paride.h
@@ -163,8 +163,8 @@
 
 typedef struct pi_protocol PIP;
 
-extern int pi_register( PIP * );
-extern void pi_unregister ( PIP * );
+extern int paride_register( PIP * );
+extern void paride_unregister ( PIP * );
 
 #endif /* __DRIVERS_PARIDE_H__ */
 /* end of paride.h */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index ac5ba46..c852eed 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -912,12 +912,12 @@
 	int unit;
 
 	if (disable)
-		return -1;
+		return -EINVAL;
 
 	pcd_init_units();
 
 	if (pcd_detect())
-		return -1;
+		return -ENODEV;
 
 	/* get the atapi capabilities page */
 	pcd_probe_capabilities();
@@ -925,7 +925,7 @@
 	if (register_blkdev(major, name)) {
 		for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
 			put_disk(cd->disk);
-		return -1;
+		return -EBUSY;
 	}
 
 	pcd_queue = blk_init_queue(do_pcd_request, &pcd_lock);
@@ -933,7 +933,7 @@
 		unregister_blkdev(major, name);
 		for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
 			put_disk(cd->disk);
-		return -1;
+		return -ENOMEM;
 	}
 
 	for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 40a11e5..9d9bff2 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -352,19 +352,19 @@
 
 static void run_fsm(void);
 
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
 
-static DECLARE_WORK(fsm_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
 
 static void schedule_fsm(void)
 {
 	if (!nice)
-		schedule_work(&fsm_tq);
+		schedule_delayed_work(&fsm_tq, 0);
 	else
 		schedule_delayed_work(&fsm_tq, nice-1);
 }
 
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
 {
 	run_fsm();
 }
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 1a9dee1..7cdaa19 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -933,25 +933,25 @@
 	int unit;
 
 	if (disable)
-		return -1;
+		return -EINVAL;
 
 	pf_init_units();
 
 	if (pf_detect())
-		return -1;
+		return -ENODEV;
 	pf_busy = 0;
 
 	if (register_blkdev(major, name)) {
 		for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
 			put_disk(pf->disk);
-		return -1;
+		return -EBUSY;
 	}
 	pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock);
 	if (!pf_queue) {
 		unregister_blkdev(major, name);
 		for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
 			put_disk(pf->disk);
-		return -1;
+		return -ENOMEM;
 	}
 
 	blk_queue_max_phys_segments(pf_queue, cluster);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 13f998a..9970aed 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -646,14 +646,14 @@
 	int err;
 
 	if (disable){
-		err = -1;
+		err = -EINVAL;
 		goto out;
 	}
 
 	pg_init_units();
 
 	if (pg_detect()) {
-		err = -1;
+		err = -ENODEV;
 		goto out;
 	}
 
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h
index 932342d..bc37032 100644
--- a/drivers/block/paride/pseudo.h
+++ b/drivers/block/paride/pseudo.h
@@ -35,7 +35,7 @@
 #include <linux/sched.h>
 #include <linux/workqueue.h>
 
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
 
 static void (* ps_continuation)(void);
 static int (* ps_ready)(void);
@@ -45,7 +45,7 @@
 
 static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
 
-static DECLARE_WORK(ps_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
 
 static void ps_set_intr(void (*continuation)(void), 
 			int (*ready)(void),
@@ -63,14 +63,14 @@
 	if (!ps_tq_active) {
 		ps_tq_active = 1;
 		if (!ps_nice)
-			schedule_work(&ps_tq);
+			schedule_delayed_work(&ps_tq, 0);
 		else
 			schedule_delayed_work(&ps_tq, ps_nice-1);
 	}
 	spin_unlock_irqrestore(&ps_spinlock,flags);
 }
 
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
 {
 	void (*con)(void);
 	unsigned long flags;
@@ -92,7 +92,7 @@
 	}
 	ps_tq_active = 1;
 	if (!ps_nice)
-		schedule_work(&ps_tq);
+		schedule_delayed_work(&ps_tq, 0);
 	else
 		schedule_delayed_work(&ps_tq, ps_nice-1);
 	spin_unlock_irqrestore(&ps_spinlock,flags);
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 35fb266..c902b25 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -946,12 +946,12 @@
 	int err;
 
 	if (disable) {
-		err = -1;
+		err = -EINVAL;
 		goto out;
 	}
 
 	if (pt_detect()) {
-		err = -1;
+		err = -ENODEV;
 		goto out;
 	}
 
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f2904f6..e45eaa2 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -54,7 +54,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/miscdevice.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/mutex.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_ioctl.h>
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 47d6975..54509eb 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1244,9 +1244,10 @@
 	return IRQ_RETVAL(handled);
 }
 
-static void carm_fsm_task (void *_data)
+static void carm_fsm_task (struct work_struct *work)
 {
-	struct carm_host *host = _data;
+	struct carm_host *host =
+		container_of(work, struct carm_host, fsm_task);
 	unsigned long flags;
 	unsigned int state;
 	int rc, i, next_dev;
@@ -1619,7 +1620,7 @@
 	host->pdev = pdev;
 	host->flags = pci_dac ? FL_DAC : 0;
 	spin_lock_init(&host->lock);
-	INIT_WORK(&host->fsm_task, carm_fsm_task, host);
+	INIT_WORK(&host->fsm_task, carm_fsm_task);
 	init_completion(&host->probe_comp);
 
 	for (i = 0; i < ARRAY_SIZE(host->req); i++)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 0d5c73f..2098eff 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -376,7 +376,7 @@
     int stalled_pipe);
 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
 static void ub_reset_enter(struct ub_dev *sc, int try);
-static void ub_reset_task(void *arg);
+static void ub_reset_task(struct work_struct *work);
 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
     struct ub_capacity *ret);
@@ -1558,9 +1558,9 @@
 	schedule_work(&sc->reset_work);
 }
 
-static void ub_reset_task(void *arg)
+static void ub_reset_task(struct work_struct *work)
 {
-	struct ub_dev *sc = arg;
+	struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
 	unsigned long flags;
 	struct list_head *p;
 	struct ub_lun *lun;
@@ -2179,7 +2179,7 @@
 	usb_init_urb(&sc->work_urb);
 	tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
 	atomic_set(&sc->poison, 0);
-	INIT_WORK(&sc->reset_work, ub_reset_task, sc);
+	INIT_WORK(&sc->reset_work, ub_reset_task);
 	init_waitqueue_head(&sc->reset_wait);
 
 	init_timer(&sc->work_timer);
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 5167517..9256985 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -157,9 +157,10 @@
 	}
 }
 
-static void bcm203x_work(void *user_data)
+static void bcm203x_work(struct work_struct *work)
 {
-	struct bcm203x_data *data = user_data;
+	struct bcm203x_data *data =
+		container_of(work, struct bcm203x_data, work);
 
 	if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
 		BT_ERR("Can't submit URB");
@@ -246,7 +247,7 @@
 
 	release_firmware(firmware);
 
-	INIT_WORK(&data->work, bcm203x_work, (void *) data);
+	INIT_WORK(&data->work, bcm203x_work);
 
 	usb_set_intfdata(intf, data);
 
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index d0cface..5e2c318 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -330,7 +330,7 @@
 	   reliable packet if the number of packets sent but not yet ack'ed
 	   is < than the winsize */
 
-	spin_lock_irqsave(&bcsp->unack.lock, flags);
+	spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
 
 	if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
 		struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
@@ -696,7 +696,7 @@
 
 	BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen);
 
-	spin_lock_irqsave(&bcsp->unack.lock, flags);
+	spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
 
 	while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) {
 		bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07;
diff --git a/drivers/cdrom/optcd.c b/drivers/cdrom/optcd.c
index 25032d7..3541690 100644
--- a/drivers/cdrom/optcd.c
+++ b/drivers/cdrom/optcd.c
@@ -101,7 +101,7 @@
 		return;
 
 	va_start(args, fmt);
-	vsprintf(s, fmt, args);
+	vsnprintf(s, sizeof(s), fmt, args);
 	printk(KERN_DEBUG "optcd: %s\n", s);
 	va_end(args);
 }
diff --git a/drivers/cdrom/sbpcd.c b/drivers/cdrom/sbpcd.c
index ba50e5a..a1283b1 100644
--- a/drivers/cdrom/sbpcd.c
+++ b/drivers/cdrom/sbpcd.c
@@ -770,11 +770,10 @@
 	
 	msgnum++;
 	if (msgnum>99) msgnum=0;
-	sprintf(buf, MSG_LEVEL "%s-%d [%02d]:  ", major_name, current_drive - D_S, msgnum);
 	va_start(args, fmt);
-	vsprintf(&buf[18], fmt, args);
+	vsnprintf(buf, sizeof(buf), fmt, args);
 	va_end(args);
-	printk(buf);
+	printk(MSG_LEVEL "%s-%d [%02d]:  %s", major_name, current_drive - D_S, msgnum, buf);
 #if KLOGD_PAUSE
 	sbp_sleep(KLOGD_PAUSE); /* else messages get lost */
 #endif /* KLOGD_PAUSE */ 
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 00b17ae..2f2c4ef 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -459,7 +459,7 @@
 
 /* Handle shadow device of the Nvidia NForce3 */
 /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
-static int __devinit nforce3_agp_init(struct pci_dev *pdev)
+static int nforce3_agp_init(struct pci_dev *pdev)
 {
 	u32 tmp, apbase, apbar, aplimit;
 	struct pci_dev *dev1;
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e608dad..acb2de5 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -926,9 +926,10 @@
  * had to poll every port to see if that port needed servicing.
  */
 static void
-do_softint(void *private_)
+do_softint(struct work_struct *work)
 {
-  struct cyclades_port *info = (struct cyclades_port *) private_;
+	struct cyclades_port *info =
+		container_of(work, struct cyclades_port, tqueue);
   struct tty_struct    *tty;
 
     tty = info->tty;
@@ -5328,7 +5329,7 @@
                     info->blocked_open = 0;
                     info->default_threshold = 0;
                     info->default_timeout = 0;
-		    INIT_WORK(&info->tqueue, do_softint, info);
+		    INIT_WORK(&info->tqueue, do_softint);
 		    init_waitqueue_head(&info->open_wait);
 		    init_waitqueue_head(&info->close_wait);
 		    init_waitqueue_head(&info->shutdown_wait);
@@ -5403,7 +5404,7 @@
                     info->blocked_open = 0;
                     info->default_threshold = 0;
                     info->default_timeout = 0;
-		    INIT_WORK(&info->tqueue, do_softint, info);
+		    INIT_WORK(&info->tqueue, do_softint);
 		    init_waitqueue_head(&info->open_wait);
 		    init_waitqueue_head(&info->close_wait);
 		    init_waitqueue_head(&info->shutdown_wait);
diff --git a/drivers/char/decserial.c b/drivers/char/decserial.c
index 85f404e..8ea2bea 100644
--- a/drivers/char/decserial.c
+++ b/drivers/char/decserial.c
@@ -23,20 +23,12 @@
 extern int zs_init(void);
 #endif
 
-#ifdef CONFIG_DZ
-extern int dz_init(void);
-#endif
-
 #ifdef CONFIG_SERIAL_CONSOLE
 
 #ifdef CONFIG_ZS
 extern void zs_serial_console_init(void);
 #endif
 
-#ifdef CONFIG_DZ
-extern void dz_serial_console_init(void);
-#endif
-
 #endif
 
 /* rs_init - starts up the serial interface -
@@ -46,23 +38,11 @@
 
 int __init rs_init(void)
 {
-
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
+#ifdef CONFIG_ZS
     if (IOASIC)
 	return zs_init();
-    else
-	return dz_init();
-#else
-
-#ifdef CONFIG_ZS
-    return zs_init();
 #endif
-
-#ifdef CONFIG_DZ
-    return dz_init();
-#endif
-
-#endif
+    return -ENXIO;
 }
 
 __initcall(rs_init);
@@ -76,21 +56,9 @@
  */
 static int __init decserial_console_init(void)
 {
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
+#ifdef CONFIG_ZS
     if (IOASIC)
 	zs_serial_console_init();
-    else
-	dz_serial_console_init();
-#else
-
-#ifdef CONFIG_ZS
-    zs_serial_console_init();
-#endif
-
-#ifdef CONFIG_DZ
-    dz_serial_console_init();
-#endif
-
 #endif
     return 0;
 }
diff --git a/drivers/char/drm/drm_sman.c b/drivers/char/drm/drm_sman.c
index 425c823..19c81d2 100644
--- a/drivers/char/drm/drm_sman.c
+++ b/drivers/char/drm/drm_sman.c
@@ -162,6 +162,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_sman_set_manager);
 
 static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman,
 						 unsigned long owner)
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index b40ae43..ae26919 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -147,14 +147,14 @@
 	if (address > vma->vm_end)
 		return NOPAGE_SIGBUS;	/* Disallow mremap */
 	if (!map)
-		return NOPAGE_OOM;	/* Nothing allocated */
+		return NOPAGE_SIGBUS;	/* Nothing allocated */
 
 	offset = address - vma->vm_start;
 	i = (unsigned long)map->handle + offset;
 	page = (map->type == _DRM_CONSISTENT) ?
 		virt_to_page((void *)i) : vmalloc_to_page((void *)i);
 	if (!page)
-		return NOPAGE_OOM;
+		return NOPAGE_SIGBUS;
 	get_page(page);
 
 	DRM_DEBUG("shm_nopage 0x%lx\n", address);
@@ -272,7 +272,7 @@
 	if (address > vma->vm_end)
 		return NOPAGE_SIGBUS;	/* Disallow mremap */
 	if (!dma->pagelist)
-		return NOPAGE_OOM;	/* Nothing allocated */
+		return NOPAGE_SIGBUS;	/* Nothing allocated */
 
 	offset = address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
 	page_nr = offset >> PAGE_SHIFT;
@@ -310,7 +310,7 @@
 	if (address > vma->vm_end)
 		return NOPAGE_SIGBUS;	/* Disallow mremap */
 	if (!entry->pagelist)
-		return NOPAGE_OOM;	/* Nothing allocated */
+		return NOPAGE_SIGBUS;	/* Nothing allocated */
 
 	offset = address - vma->vm_start;
 	map_offset = map->offset - (unsigned long)dev->sg->virtual;
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 60c1695..806f9ce 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -500,9 +500,9 @@
 
 
 static void 
-via_dmablit_workqueue(void *data)
+via_dmablit_workqueue(struct work_struct *work)
 {
-	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
 	drm_device_t *dev = blitq->dev;
 	unsigned long irqsave;
 	drm_via_sg_info_t *cur_sg;
@@ -571,7 +571,7 @@
 			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
 		}
 		DRM_INIT_WAITQUEUE(&blitq->busy_queue);
-		INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
+		INIT_WORK(&blitq->wq, via_dmablit_workqueue);
 		init_timer(&blitq->poll_timer);
 		blitq->poll_timer.function = &via_dmablit_timer;
 		blitq->poll_timer.data = (unsigned long) blitq;
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 706733c..7c71eb7 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -200,7 +200,7 @@
 static int info_ioctl(struct tty_struct *, struct file *,
                     unsigned int, unsigned long);
 static void pc_set_termios(struct tty_struct *, struct termios *);
-static void do_softint(void *);
+static void do_softint(struct work_struct *work);
 static void pc_stop(struct tty_struct *);
 static void pc_start(struct tty_struct *);
 static void pc_throttle(struct tty_struct * tty);
@@ -1505,7 +1505,7 @@
 
 		ch->brdchan        = bc;
 		ch->mailbox        = gd; 
-		INIT_WORK(&ch->tqueue, do_softint, ch);
+		INIT_WORK(&ch->tqueue, do_softint);
 		ch->board          = &boards[crd];
 
 		spin_lock_irqsave(&epca_lock, flags);
@@ -2566,9 +2566,9 @@
 
 /* --------------------- Begin do_softint  ----------------------- */
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 { /* Begin do_softint */
-	struct channel *ch = (struct channel *) private_;
+	struct channel *ch = container_of(work, struct channel, tqueue);
 	/* Called in response to a modem change event */
 	if (ch && ch->magic == EPCA_MAGIC)  { /* Begin EPCA_MAGIC */
 		struct tty_struct *tty = ch->tty;
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 15a4ea8..93b5519 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -723,9 +723,10 @@
  * -------------------------------------------------------------------
  */
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 {
-	struct esp_struct	*info = (struct esp_struct *) private_;
+	struct esp_struct	*info =
+		container_of(work, struct esp_struct, tqueue);
 	struct tty_struct	*tty;
 	
 	tty = info->tty;
@@ -746,9 +747,10 @@
  * 	do_serial_hangup() -> tty->hangup() -> esp_hangup()
  * 
  */
-static void do_serial_hangup(void *private_)
+static void do_serial_hangup(struct work_struct *work)
 {
-	struct esp_struct	*info = (struct esp_struct *) private_;
+	struct esp_struct	*info =
+		container_of(work, struct esp_struct, tqueue_hangup);
 	struct tty_struct	*tty;
 	
 	tty = info->tty;
@@ -2501,8 +2503,8 @@
 		info->magic = ESP_MAGIC;
 		info->close_delay = 5*HZ/10;
 		info->closing_wait = 30*HZ;
-		INIT_WORK(&info->tqueue, do_softint, info);
-		INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+		INIT_WORK(&info->tqueue, do_softint);
+		INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
 		info->config.rx_timeout = rx_timeout;
 		info->config.flow_on = flow_on;
 		info->config.flow_off = flow_off;
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 817dc40..23b25ad 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -102,7 +102,7 @@
  * Routine to poll RTC seconds field for change as often as possible,
  * after first RTC_UIE use timer to reduce polling
  */
-static void genrtc_troutine(void *data)
+static void genrtc_troutine(struct work_struct *work)
 {
 	unsigned int tmp = get_rtc_ss();
 	
@@ -255,7 +255,7 @@
 		irq_active = 1;
 		stop_rtc_timers = 0;
 		lostint = 0;
-		INIT_WORK(&genrtc_task, genrtc_troutine, NULL);
+		INIT_WORK(&genrtc_task, genrtc_troutine);
 		oldsecs = get_rtc_ss();
 		init_timer(&timer_task);
 
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 9902ffa..cc2cd46 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -38,6 +38,7 @@
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/delay.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 8728255..d090622 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -337,11 +337,6 @@
 static void hvcs_close(struct tty_struct *tty, struct file *filp);
 static void hvcs_hangup(struct tty_struct * tty);
 
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd);
-static void hvcs_remove_device_attrs(struct vio_dev *vdev);
-static void hvcs_create_driver_attrs(void);
-static void hvcs_remove_driver_attrs(void);
-
 static int __devinit hvcs_probe(struct vio_dev *dev,
 		const struct vio_device_id *id);
 static int __devexit hvcs_remove(struct vio_dev *dev);
@@ -353,6 +348,172 @@
 #define HVCS_TRY_WRITE	0x00000004
 #define HVCS_READ_MASK	(HVCS_SCHED_READ | HVCS_QUICK_READ)
 
+static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
+{
+	return viod->dev.driver_data;
+}
+/* The sysfs interface for the driver and devices */
+
+static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
+
+static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
+
+static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
+		size_t count)
+{
+	/*
+	 * Don't need this feature at the present time because firmware doesn't
+	 * yet support multiple partners.
+	 */
+	printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
+	return -EPERM;
+}
+
+static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+
+static DEVICE_ATTR(current_vty,
+	S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
+
+static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
+		size_t count)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+
+	/* writing a '0' to this sysfs entry will result in the disconnect. */
+	if (simple_strtol(buf, NULL, 0) != 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+
+	if (hvcsd->open_count > 0) {
+		spin_unlock_irqrestore(&hvcsd->lock, flags);
+		printk(KERN_INFO "HVCS: vterm state unchanged.  "
+				"The hvcs device node is still in use.\n");
+		return -EPERM;
+	}
+
+	if (hvcsd->connected == 0) {
+		spin_unlock_irqrestore(&hvcsd->lock, flags);
+		printk(KERN_INFO "HVCS: vterm state unchanged. The"
+				" vty-server is not connected to a vty.\n");
+		return -EPERM;
+	}
+
+	hvcs_partner_free(hvcsd);
+	printk(KERN_INFO "HVCS: Closed vty-server@%X and"
+			" partner vty@%X:%d connection.\n",
+			hvcsd->vdev->unit_address,
+			hvcsd->p_unit_address,
+			(uint32_t)hvcsd->p_partition_ID);
+
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return count;
+}
+
+static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%d\n", hvcsd->connected);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
+		hvcs_vterm_state_show, hvcs_vterm_state_store);
+
+static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%d\n", hvcsd->index);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+
+static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
+
+static struct attribute *hvcs_attrs[] = {
+	&dev_attr_partner_vtys.attr,
+	&dev_attr_partner_clcs.attr,
+	&dev_attr_current_vty.attr,
+	&dev_attr_vterm_state.attr,
+	&dev_attr_index.attr,
+	NULL,
+};
+
+static struct attribute_group hvcs_attr_group = {
+	.attrs = hvcs_attrs,
+};
+
+static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
+{
+	/* A 1 means it is updating, a 0 means it is done updating */
+	return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
+}
+
+static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
+		size_t count)
+{
+	if ((simple_strtol(buf, NULL, 0) != 1)
+		&& (hvcs_rescan_status != 0))
+		return -EINVAL;
+
+	hvcs_rescan_status = 1;
+	printk(KERN_INFO "HVCS: rescanning partner info for all"
+		" vty-servers.\n");
+	hvcs_rescan_devices_list();
+	hvcs_rescan_status = 0;
+	return count;
+}
+
+static DRIVER_ATTR(rescan,
+	S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
+
 static void hvcs_kick(void)
 {
 	hvcs_kicked = 1;
@@ -575,7 +736,7 @@
 	spin_unlock_irqrestore(&hvcsd->lock, flags);
 	spin_unlock(&hvcs_structs_lock);
 
-	hvcs_remove_device_attrs(vdev);
+	sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
 
 	kfree(hvcsd);
 }
@@ -608,6 +769,7 @@
 {
 	struct hvcs_struct *hvcsd;
 	int index;
+	int retval;
 
 	if (!dev || !id) {
 		printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
@@ -658,14 +820,16 @@
 	 * the hvcs_struct has been added to the devices list then the user app
 	 * will get -ENODEV.
 	 */
-
 	spin_lock(&hvcs_structs_lock);
-
 	list_add_tail(&(hvcsd->next), &hvcs_structs);
-
 	spin_unlock(&hvcs_structs_lock);
 
-	hvcs_create_device_attrs(hvcsd);
+	retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group);
+	if (retval) {
+		printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n",
+		       hvcsd->vdev->unit_address);
+		return retval;
+	}
 
 	printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address);
 
@@ -1354,8 +1518,10 @@
 	if (!hvcs_tty_driver)
 		return -ENOMEM;
 
-	if (hvcs_alloc_index_list(num_ttys_to_alloc))
-		return -ENOMEM;
+	if (hvcs_alloc_index_list(num_ttys_to_alloc)) {
+		rc = -ENOMEM;
+		goto index_fail;
+	}
 
 	hvcs_tty_driver->owner = THIS_MODULE;
 
@@ -1385,41 +1551,57 @@
 	 * dynamically assigned major and minor numbers for our devices.
 	 */
 	if (tty_register_driver(hvcs_tty_driver)) {
-		printk(KERN_ERR "HVCS: registration "
-			" as a tty driver failed.\n");
-		hvcs_free_index_list();
-		put_tty_driver(hvcs_tty_driver);
-		return -EIO;
+		printk(KERN_ERR "HVCS: registration as a tty driver failed.\n");
+		rc = -EIO;
+		goto register_fail;
 	}
 
 	hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!hvcs_pi_buff) {
-		tty_unregister_driver(hvcs_tty_driver);
-		hvcs_free_index_list();
-		put_tty_driver(hvcs_tty_driver);
-		return -ENOMEM;
+		rc = -ENOMEM;
+		goto buff_alloc_fail;
 	}
 
 	hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
 	if (IS_ERR(hvcs_task)) {
 		printk(KERN_ERR "HVCS: khvcsd creation failed.  Driver not loaded.\n");
-		kfree(hvcs_pi_buff);
-		tty_unregister_driver(hvcs_tty_driver);
-		hvcs_free_index_list();
-		put_tty_driver(hvcs_tty_driver);
-		return -EIO;
+		rc = -EIO;
+		goto kthread_fail;
 	}
 
 	rc = vio_register_driver(&hvcs_vio_driver);
+	if (rc) {
+		printk(KERN_ERR "HVCS: can't register vio driver\n");
+		goto vio_fail;
+	}
 
 	/*
 	 * This needs to be done AFTER the vio_register_driver() call or else
 	 * the kobjects won't be initialized properly.
 	 */
-	hvcs_create_driver_attrs();
+	rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
+	if (rc) {
+		printk(KERN_ERR "HVCS: sysfs attr create failed\n");
+		goto attr_fail;
+	}
 
 	printk(KERN_INFO "HVCS: driver module inserted.\n");
 
+	return 0;
+
+attr_fail:
+	vio_unregister_driver(&hvcs_vio_driver);
+vio_fail:
+	kthread_stop(hvcs_task);
+kthread_fail:
+	kfree(hvcs_pi_buff);
+buff_alloc_fail:
+	tty_unregister_driver(hvcs_tty_driver);
+register_fail:
+	hvcs_free_index_list();
+index_fail:
+	put_tty_driver(hvcs_tty_driver);
+	hvcs_tty_driver = NULL;
 	return rc;
 }
 
@@ -1441,7 +1623,7 @@
 	hvcs_pi_buff = NULL;
 	spin_unlock(&hvcs_pi_lock);
 
-	hvcs_remove_driver_attrs();
+	driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan);
 
 	vio_unregister_driver(&hvcs_vio_driver);
 
@@ -1456,191 +1638,3 @@
 
 module_init(hvcs_module_init);
 module_exit(hvcs_module_exit);
-
-static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
-{
-	return viod->dev.driver_data;
-}
-/* The sysfs interface for the driver and devices */
-
-static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
-
-static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
-
-static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
-		size_t count)
-{
-	/*
-	 * Don't need this feature at the present time because firmware doesn't
-	 * yet support multiple partners.
-	 */
-	printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
-	return -EPERM;
-}
-
-static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-
-static DEVICE_ATTR(current_vty,
-	S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
-
-static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
-		size_t count)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-
-	/* writing a '0' to this sysfs entry will result in the disconnect. */
-	if (simple_strtol(buf, NULL, 0) != 0)
-		return -EINVAL;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-
-	if (hvcsd->open_count > 0) {
-		spin_unlock_irqrestore(&hvcsd->lock, flags);
-		printk(KERN_INFO "HVCS: vterm state unchanged.  "
-				"The hvcs device node is still in use.\n");
-		return -EPERM;
-	}
-
-	if (hvcsd->connected == 0) {
-		spin_unlock_irqrestore(&hvcsd->lock, flags);
-		printk(KERN_INFO "HVCS: vterm state unchanged. The"
-				" vty-server is not connected to a vty.\n");
-		return -EPERM;
-	}
-
-	hvcs_partner_free(hvcsd);
-	printk(KERN_INFO "HVCS: Closed vty-server@%X and"
-			" partner vty@%X:%d connection.\n",
-			hvcsd->vdev->unit_address,
-			hvcsd->p_unit_address,
-			(uint32_t)hvcsd->p_partition_ID);
-
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return count;
-}
-
-static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%d\n", hvcsd->connected);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
-		hvcs_vterm_state_show, hvcs_vterm_state_store);
-
-static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%d\n", hvcsd->index);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-
-static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
-
-static struct attribute *hvcs_attrs[] = {
-	&dev_attr_partner_vtys.attr,
-	&dev_attr_partner_clcs.attr,
-	&dev_attr_current_vty.attr,
-	&dev_attr_vterm_state.attr,
-	&dev_attr_index.attr,
-	NULL,
-};
-
-static struct attribute_group hvcs_attr_group = {
-	.attrs = hvcs_attrs,
-};
-
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd)
-{
-	struct vio_dev *vdev = hvcsd->vdev;
-	sysfs_create_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static void hvcs_remove_device_attrs(struct vio_dev *vdev)
-{
-	sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
-{
-	/* A 1 means it is updating, a 0 means it is done updating */
-	return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
-}
-
-static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
-		size_t count)
-{
-	if ((simple_strtol(buf, NULL, 0) != 1)
-		&& (hvcs_rescan_status != 0))
-		return -EINVAL;
-
-	hvcs_rescan_status = 1;
-	printk(KERN_INFO "HVCS: rescanning partner info for all"
-		" vty-servers.\n");
-	hvcs_rescan_devices_list();
-	hvcs_rescan_status = 0;
-	return count;
-}
-static DRIVER_ATTR(rescan,
-	S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
-
-static void hvcs_create_driver_attrs(void)
-{
-	struct device_driver *driverfs = &(hvcs_vio_driver.driver);
-	driver_create_file(driverfs, &driver_attr_rescan);
-}
-
-static void hvcs_remove_driver_attrs(void)
-{
-	struct device_driver *driverfs = &(hvcs_vio_driver.driver);
-	driver_remove_file(driverfs, &driver_attr_rescan);
-}
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 2cf63e7..82a41d5 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -69,7 +69,7 @@
 #define __ALIGNED__	__attribute__((__aligned__(sizeof(long))))
 
 struct hvsi_struct {
-	struct work_struct writer;
+	struct delayed_work writer;
 	struct work_struct handshaker;
 	wait_queue_head_t emptyq; /* woken when outbuf is emptied */
 	wait_queue_head_t stateq; /* woken when HVSI state changes */
@@ -744,9 +744,10 @@
 	return 0;
 }
 
-static void hvsi_handshaker(void *arg)
+static void hvsi_handshaker(struct work_struct *work)
 {
-	struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+	struct hvsi_struct *hp =
+		container_of(work, struct hvsi_struct, handshaker);
 
 	if (hvsi_handshake(hp) >= 0)
 		return;
@@ -951,9 +952,10 @@
 }
 
 /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
-static void hvsi_write_worker(void *arg)
+static void hvsi_write_worker(struct work_struct *work)
 {
-	struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+	struct hvsi_struct *hp =
+		container_of(work, struct hvsi_struct, writer.work);
 	unsigned long flags;
 #ifdef DEBUG
 	static long start_j = 0;
@@ -1287,8 +1289,8 @@
 		}
 
 		hp = &hvsi_ports[hvsi_count];
-		INIT_WORK(&hp->writer, hvsi_write_worker, hp);
-		INIT_WORK(&hp->handshaker, hvsi_handshaker, hp);
+		INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
+		INIT_WORK(&hp->handshaker, hvsi_handshaker);
 		init_waitqueue_head(&hp->emptyq);
 		init_waitqueue_head(&hp->stateq);
 		spin_lock_init(&hp->lock);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 9f7635f..5f3acd8 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -3,17 +3,20 @@
 #
 
 config HW_RANDOM
-	bool "Hardware Random Number Generator Core support"
-	default y
+	tristate "Hardware Random Number Generator Core support"
+	default m
 	---help---
 	  Hardware Random Number Generator Core infrastructure.
 
+	  To compile this driver as a module, choose M here: the
+	  module will be called rng-core.
+
 	  If unsure, say Y.
 
 config HW_RANDOM_INTEL
 	tristate "Intel HW Random Number Generator support"
 	depends on HW_RANDOM && (X86 || IA64) && PCI
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on Intel i8xx-based motherboards.
@@ -26,7 +29,7 @@
 config HW_RANDOM_AMD
 	tristate "AMD HW Random Number Generator support"
 	depends on HW_RANDOM && X86 && PCI
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on AMD 76x-based motherboards.
@@ -39,7 +42,7 @@
 config HW_RANDOM_GEODE
 	tristate "AMD Geode HW Random Number Generator support"
 	depends on HW_RANDOM && X86 && PCI
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on the AMD Geode LX.
@@ -52,7 +55,7 @@
 config HW_RANDOM_VIA
 	tristate "VIA HW Random Number Generator support"
 	depends on HW_RANDOM && X86_32
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on VIA based motherboards.
@@ -65,7 +68,7 @@
 config HW_RANDOM_IXP4XX
 	tristate "Intel IXP4xx NPU HW Random Number Generator support"
 	depends on HW_RANDOM && ARCH_IXP4XX
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random
 	  Number Generator hardware found on the Intel IXP4xx NPU.
@@ -78,7 +81,7 @@
 config HW_RANDOM_OMAP
 	tristate "OMAP Random Number Generator support"
 	depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX)
-	default y
+	default HW_RANDOM
  	---help---
  	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on OMAP16xx and OMAP24xx multimedia
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e263ae9..c41fa19 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -2,7 +2,8 @@
 # Makefile for HW Random Number Generator (RNG) device drivers.
 #
 
-obj-$(CONFIG_HW_RANDOM) += core.o
+obj-$(CONFIG_HW_RANDOM) += rng-core.o
+rng-core-y := core.o
 obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
 obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
 obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
diff --git a/drivers/char/ip2/i2cmd.h b/drivers/char/ip2/i2cmd.h
index baa4e72..29277ec 100644
--- a/drivers/char/ip2/i2cmd.h
+++ b/drivers/char/ip2/i2cmd.h
@@ -367,11 +367,6 @@
 #define CSE_NULL  3  // Replace with a null
 #define CSE_MARK  4  // Replace with a 3-character sequence (as Unix)
 
-#define  CMD_SET_REPLACEMENT(arg,ch)   \
-			(((cmdSyntaxPtr)(ct36a))->cmd[1] = (arg), \
-			(((cmdSyntaxPtr)(ct36a))->cmd[2] = (ch),  \
-			(cmdSyntaxPtr)(ct36a))
-
 #define CSE_REPLACE  0x8	// Replace the errored character with the
 							// replacement character defined here
 
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index 54d93f0..7804576 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -84,8 +84,8 @@
 static void serviceOutgoingFifo(i2eBordStrPtr);
 
 // Functions defined in ip2.c as part of interrupt handling
-static void do_input(void *);
-static void do_status(void *);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
 
 //***************
 //* Debug  Data *
@@ -331,8 +331,8 @@
 		pCh->ClosingWaitTime  = 30*HZ;
 
 		// Initialize task queue objects
-		INIT_WORK(&pCh->tqueue_input, do_input, pCh);
-		INIT_WORK(&pCh->tqueue_status, do_status, pCh);
+		INIT_WORK(&pCh->tqueue_input, do_input);
+		INIT_WORK(&pCh->tqueue_status, do_status);
 
 #ifdef IP2DEBUG_TRACE
 		pCh->trace = ip2trace;
@@ -1016,7 +1016,6 @@
 	unsigned short channel;
 	unsigned short stuffIndex;
 	unsigned long flags;
-	int rc = 0;
 
 	int bailout = 10;
 
@@ -1573,7 +1572,7 @@
 #ifdef USE_IQ
 			schedule_work(&pCh->tqueue_input);
 #else
-			do_input(pCh);
+			do_input(&pCh->tqueue_input);
 #endif
 
 			// Note we do not need to maintain any flow-control credits at this
@@ -1810,7 +1809,7 @@
 #ifdef USE_IQ
 						schedule_work(&pCh->tqueue_status);
 #else
-						do_status(pCh);
+						do_status(&pCh->tqueue_status);
 #endif
 					}
 				}
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index a3f32d4..cda2459 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -189,12 +189,12 @@
 			 unsigned int set, unsigned int clear);
 
 static void set_irq(int, int);
-static void ip2_interrupt_bh(i2eBordStrPtr pB);
+static void ip2_interrupt_bh(struct work_struct *work);
 static irqreturn_t ip2_interrupt(int irq, void *dev_id);
 static void ip2_poll(unsigned long arg);
 static inline void service_all_boards(void);
-static void do_input(void *p);
-static void do_status(void *p);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
 
 static void ip2_wait_until_sent(PTTY,int);
 
@@ -918,7 +918,7 @@
 		pCh++;
 	}
 ex_exit:
-	INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB);
+	INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
 	return;
 
 err_release_region:
@@ -1125,8 +1125,8 @@
 
 
 /******************************************************************************/
-/* Function:   ip2_interrupt_bh(pB)                                           */
-/* Parameters: pB - pointer to the board structure                            */
+/* Function:   ip2_interrupt_bh(work)                                         */
+/* Parameters: work - pointer to the board structure                          */
 /* Returns:    Nothing                                                        */
 /*                                                                            */
 /* Description:                                                               */
@@ -1135,8 +1135,9 @@
 /*                                                                            */
 /******************************************************************************/
 static void
-ip2_interrupt_bh(i2eBordStrPtr pB)
+ip2_interrupt_bh(struct work_struct *work)
 {
+	i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
 //	pB better well be set or we have a problem!  We can only get
 //	here from the IMMEDIATE queue.  Here, we process the boards.
 //	Checking pB doesn't cost much and it saves us from the sanity checkers.
@@ -1245,9 +1246,9 @@
 	ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
 }
 
-static void do_input(void *p)
+static void do_input(struct work_struct *work)
 {
-	i2ChanStrPtr pCh = p;
+	i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
 	unsigned long flags;
 
 	ip2trace(CHANN, ITRC_INPUT, 21, 0 );
@@ -1279,9 +1280,9 @@
 	}
 }
 
-static void do_status(void *p)
+static void do_status(struct work_struct *work)
 {
-	i2ChanStrPtr pCh = p;
+	i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
 	int status;
 
 	status =  i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 0030cd8..6c59baa 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -33,11 +33,13 @@
 #include <linux/ipmi_msgdefs.h>		/* for completion codes */
 #include "ipmi_si_sm.h"
 
-static int bt_debug = 0x00;	/* Production value 0, see following flags */
+#define BT_DEBUG_OFF	0	/* Used in production */
+#define BT_DEBUG_ENABLE	1	/* Generic messages */
+#define BT_DEBUG_MSG	2	/* Prints all request/response buffers */
+#define BT_DEBUG_STATES	4	/* Verbose look at state changes */
 
-#define	BT_DEBUG_ENABLE	1
-#define BT_DEBUG_MSG	2
-#define BT_DEBUG_STATES	4
+static int bt_debug = BT_DEBUG_OFF;
+
 module_param(bt_debug, int, 0644);
 MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
 
@@ -47,38 +49,54 @@
    Since the Open IPMI architecture is single-message oriented at this
    stage, the queue depth of BT is of no concern. */
 
-#define BT_NORMAL_TIMEOUT	5000000	/* seconds in microseconds */
-#define BT_RETRY_LIMIT		2
-#define BT_RESET_DELAY		6000000	/* 6 seconds after warm reset */
+#define BT_NORMAL_TIMEOUT	5	/* seconds */
+#define BT_NORMAL_RETRY_LIMIT	2
+#define BT_RESET_DELAY		6	/* seconds after warm reset */
+
+/* States are written in chronological order and usually cover
+   multiple rows of the state table discussion in the IPMI spec. */
 
 enum bt_states {
-	BT_STATE_IDLE,
+	BT_STATE_IDLE = 0,	/* Order is critical in this list */
 	BT_STATE_XACTION_START,
 	BT_STATE_WRITE_BYTES,
-	BT_STATE_WRITE_END,
 	BT_STATE_WRITE_CONSUME,
-	BT_STATE_B2H_WAIT,
-	BT_STATE_READ_END,
-	BT_STATE_RESET1,		/* These must come last */
+	BT_STATE_READ_WAIT,
+	BT_STATE_CLEAR_B2H,
+	BT_STATE_READ_BYTES,
+	BT_STATE_RESET1,	/* These must come last */
 	BT_STATE_RESET2,
 	BT_STATE_RESET3,
 	BT_STATE_RESTART,
-	BT_STATE_HOSED
+	BT_STATE_PRINTME,
+	BT_STATE_CAPABILITIES_BEGIN,
+	BT_STATE_CAPABILITIES_END,
+	BT_STATE_LONG_BUSY	/* BT doesn't get hosed :-) */
 };
 
+/* Macros seen at the end of state "case" blocks.  They help with legibility
+   and debugging. */
+
+#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; }
+
+#define BT_SI_SM_RETURN(Y)   { last_printed = BT_STATE_PRINTME; return Y; }
+
 struct si_sm_data {
 	enum bt_states	state;
-	enum bt_states	last_state;	/* assist printing and resets */
 	unsigned char	seq;		/* BT sequence number */
 	struct si_sm_io	*io;
-        unsigned char	write_data[IPMI_MAX_MSG_LENGTH];
-        int		write_count;
-        unsigned char	read_data[IPMI_MAX_MSG_LENGTH];
-        int		read_count;
-        int		truncated;
-        long		timeout;
-        unsigned int	error_retries;	/* end of "common" fields */
+	unsigned char	write_data[IPMI_MAX_MSG_LENGTH];
+	int		write_count;
+	unsigned char	read_data[IPMI_MAX_MSG_LENGTH];
+	int		read_count;
+	int		truncated;
+	long		timeout;	/* microseconds countdown */
+	int		error_retries;	/* end of "common" fields */
 	int		nonzero_status;	/* hung BMCs stay all 0 */
+	enum bt_states	complete;	/* to divert the state machine */
+	int		BT_CAP_outreqs;
+	long		BT_CAP_req2rsp;
+	int		BT_CAP_retries;	/* Recommended retries */
 };
 
 #define BT_CLR_WR_PTR	0x01	/* See IPMI 1.5 table 11.6.4 */
@@ -111,86 +129,118 @@
 static char *state2txt(unsigned char state)
 {
 	switch (state) {
-		case BT_STATE_IDLE:		return("IDLE");
-		case BT_STATE_XACTION_START:	return("XACTION");
-		case BT_STATE_WRITE_BYTES:	return("WR_BYTES");
-		case BT_STATE_WRITE_END:	return("WR_END");
-		case BT_STATE_WRITE_CONSUME:	return("WR_CONSUME");
-		case BT_STATE_B2H_WAIT:		return("B2H_WAIT");
-		case BT_STATE_READ_END:		return("RD_END");
-		case BT_STATE_RESET1:		return("RESET1");
-		case BT_STATE_RESET2:		return("RESET2");
-		case BT_STATE_RESET3:		return("RESET3");
-		case BT_STATE_RESTART:		return("RESTART");
-		case BT_STATE_HOSED:		return("HOSED");
+	case BT_STATE_IDLE:		return("IDLE");
+	case BT_STATE_XACTION_START:	return("XACTION");
+	case BT_STATE_WRITE_BYTES:	return("WR_BYTES");
+	case BT_STATE_WRITE_CONSUME:	return("WR_CONSUME");
+	case BT_STATE_READ_WAIT:	return("RD_WAIT");
+	case BT_STATE_CLEAR_B2H:	return("CLEAR_B2H");
+	case BT_STATE_READ_BYTES:	return("RD_BYTES");
+	case BT_STATE_RESET1:		return("RESET1");
+	case BT_STATE_RESET2:		return("RESET2");
+	case BT_STATE_RESET3:		return("RESET3");
+	case BT_STATE_RESTART:		return("RESTART");
+	case BT_STATE_LONG_BUSY:	return("LONG_BUSY");
+	case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
+	case BT_STATE_CAPABILITIES_END:	return("CAP_END");
 	}
 	return("BAD STATE");
 }
 #define STATE2TXT state2txt(bt->state)
 
-static char *status2txt(unsigned char status, char *buf)
+static char *status2txt(unsigned char status)
 {
+	/*
+	 * This cannot be called by two threads at the same time and
+	 * the buffer is always consumed immediately, so the static is
+	 * safe to use.
+	 */
+	static char buf[40];
+
 	strcpy(buf, "[ ");
-	if (status & BT_B_BUSY) strcat(buf, "B_BUSY ");
-	if (status & BT_H_BUSY) strcat(buf, "H_BUSY ");
-	if (status & BT_OEM0) strcat(buf, "OEM0 ");
-	if (status & BT_SMS_ATN) strcat(buf, "SMS ");
-	if (status & BT_B2H_ATN) strcat(buf, "B2H ");
-	if (status & BT_H2B_ATN) strcat(buf, "H2B ");
+	if (status & BT_B_BUSY)
+		strcat(buf, "B_BUSY ");
+	if (status & BT_H_BUSY)
+		strcat(buf, "H_BUSY ");
+	if (status & BT_OEM0)
+		strcat(buf, "OEM0 ");
+	if (status & BT_SMS_ATN)
+		strcat(buf, "SMS ");
+	if (status & BT_B2H_ATN)
+		strcat(buf, "B2H ");
+	if (status & BT_H2B_ATN)
+		strcat(buf, "H2B ");
 	strcat(buf, "]");
 	return buf;
 }
-#define STATUS2TXT(buf) status2txt(status, buf)
+#define STATUS2TXT status2txt(status)
 
-/* This will be called from within this module on a hosed condition */
-#define FIRST_SEQ	0
+/* called externally at insmod time, and internally on cleanup */
+
 static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
 {
-	bt->state = BT_STATE_IDLE;
-	bt->last_state = BT_STATE_IDLE;
-	bt->seq = FIRST_SEQ;
-	bt->io = io;
-	bt->write_count = 0;
-	bt->read_count = 0;
-	bt->error_retries = 0;
-	bt->nonzero_status = 0;
-	bt->truncated = 0;
-	bt->timeout = BT_NORMAL_TIMEOUT;
+	memset(bt, 0, sizeof(struct si_sm_data));
+	if (bt->io != io) {		/* external: one-time only things */
+		bt->io = io;
+		bt->seq = 0;
+	}
+	bt->state = BT_STATE_IDLE;	/* start here */
+	bt->complete = BT_STATE_IDLE;	/* end here */
+	bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000;
+	bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
+	/* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
 	return 3; /* We claim 3 bytes of space; ought to check SPMI table */
 }
 
+/* Jam a completion code (probably an error) into a response */
+
+static void force_result(struct si_sm_data *bt, unsigned char completion_code)
+{
+	bt->read_data[0] = 4;				/* # following bytes */
+	bt->read_data[1] = bt->write_data[1] | 4;	/* Odd NetFn/LUN */
+	bt->read_data[2] = bt->write_data[2];		/* seq (ignored) */
+	bt->read_data[3] = bt->write_data[3];		/* Command */
+	bt->read_data[4] = completion_code;
+	bt->read_count = 5;
+}
+
+/* The upper state machine starts here */
+
 static int bt_start_transaction(struct si_sm_data *bt,
 				unsigned char *data,
 				unsigned int size)
 {
 	unsigned int i;
 
-	if ((size < 2) || (size > (IPMI_MAX_MSG_LENGTH - 2)))
-	       return -1;
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > IPMI_MAX_MSG_LENGTH)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
 
-	if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED))
-		return -2;
+	if (bt->state == BT_STATE_LONG_BUSY)
+		return IPMI_NODE_BUSY_ERR;
+
+	if (bt->state != BT_STATE_IDLE)
+		return IPMI_NOT_IN_MY_STATE_ERR;
 
 	if (bt_debug & BT_DEBUG_MSG) {
-    		printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n");
-		printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq);
+		printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
+		printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
 		for (i = 0; i < size; i ++)
-		       printk (" %02x", data[i]);
+			printk (" %02x", data[i]);
 		printk("\n");
 	}
 	bt->write_data[0] = size + 1;	/* all data plus seq byte */
 	bt->write_data[1] = *data;	/* NetFn/LUN */
-	bt->write_data[2] = bt->seq;
+	bt->write_data[2] = bt->seq++;
 	memcpy(bt->write_data + 3, data + 1, size - 1);
 	bt->write_count = size + 2;
-
 	bt->error_retries = 0;
 	bt->nonzero_status = 0;
-	bt->read_count = 0;
 	bt->truncated = 0;
 	bt->state = BT_STATE_XACTION_START;
-	bt->last_state = BT_STATE_IDLE;
-	bt->timeout = BT_NORMAL_TIMEOUT;
+	bt->timeout = bt->BT_CAP_req2rsp;
+	force_result(bt, IPMI_ERR_UNSPECIFIED);
 	return 0;
 }
 
@@ -198,38 +248,30 @@
    it calls this.  Strip out the length and seq bytes. */
 
 static int bt_get_result(struct si_sm_data *bt,
-			   unsigned char *data,
-			   unsigned int length)
+			 unsigned char *data,
+			 unsigned int length)
 {
 	int i, msg_len;
 
 	msg_len = bt->read_count - 2;		/* account for length & seq */
-	/* Always NetFn, Cmd, cCode */
 	if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
-		printk(KERN_DEBUG "BT results: bad msg_len = %d\n", msg_len);
-		data[0] = bt->write_data[1] | 0x4;	/* Kludge a response */
-		data[1] = bt->write_data[3];
-		data[2] = IPMI_ERR_UNSPECIFIED;
+		force_result(bt, IPMI_ERR_UNSPECIFIED);
 		msg_len = 3;
-	} else {
-		data[0] = bt->read_data[1];
-		data[1] = bt->read_data[3];
-		if (length < msg_len)
-		       bt->truncated = 1;
-		if (bt->truncated) {	/* can be set in read_all_bytes() */
-			data[2] = IPMI_ERR_MSG_TRUNCATED;
-			msg_len = 3;
-		} else
-		       memcpy(data + 2, bt->read_data + 4, msg_len - 2);
-
-		if (bt_debug & BT_DEBUG_MSG) {
-			printk (KERN_WARNING "BT: res (raw)");
-			for (i = 0; i < msg_len; i++)
-			       printk(" %02x", data[i]);
-			printk ("\n");
-		}
 	}
-	bt->read_count = 0;	/* paranoia */
+	data[0] = bt->read_data[1];
+	data[1] = bt->read_data[3];
+	if (length < msg_len || bt->truncated) {
+		data[2] = IPMI_ERR_MSG_TRUNCATED;
+		msg_len = 3;
+	} else
+		memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		printk (KERN_WARNING "BT: result %d bytes:", msg_len);
+		for (i = 0; i < msg_len; i++)
+			printk(" %02x", data[i]);
+		printk ("\n");
+	}
 	return msg_len;
 }
 
@@ -238,22 +280,40 @@
 
 static void reset_flags(struct si_sm_data *bt)
 {
+	if (bt_debug)
+		printk(KERN_WARNING "IPMI BT: flag reset %s\n",
+					status2txt(BT_STATUS));
 	if (BT_STATUS & BT_H_BUSY)
-	       BT_CONTROL(BT_H_BUSY);
-	if (BT_STATUS & BT_B_BUSY)
-	       BT_CONTROL(BT_B_BUSY);
-	BT_CONTROL(BT_CLR_WR_PTR);
-	BT_CONTROL(BT_SMS_ATN);
+		BT_CONTROL(BT_H_BUSY);	/* force clear */
+	BT_CONTROL(BT_CLR_WR_PTR);	/* always reset */
+	BT_CONTROL(BT_SMS_ATN);		/* always clear */
+	BT_INTMASK_W(BT_BMC_HWRST);
+}
 
-	if (BT_STATUS & BT_B2H_ATN) {
-		int i;
-		BT_CONTROL(BT_H_BUSY);
-		BT_CONTROL(BT_B2H_ATN);
-		BT_CONTROL(BT_CLR_RD_PTR);
-		for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++)
-		       BMC2HOST;
-		BT_CONTROL(BT_H_BUSY);
-	}
+/* Get rid of an unwanted/stale response.  This should only be needed for
+   BMCs that support multiple outstanding requests. */
+
+static void drain_BMC2HOST(struct si_sm_data *bt)
+{
+	int i, size;
+
+	if (!(BT_STATUS & BT_B2H_ATN)) 	/* Not signalling a response */
+		return;
+
+	BT_CONTROL(BT_H_BUSY);		/* now set */
+	BT_CONTROL(BT_B2H_ATN);		/* always clear */
+	BT_STATUS;			/* pause */
+	BT_CONTROL(BT_B2H_ATN);		/* some BMCs are stubborn */
+	BT_CONTROL(BT_CLR_RD_PTR);	/* always reset */
+	if (bt_debug)
+		printk(KERN_WARNING "IPMI BT: stale response %s; ",
+			status2txt(BT_STATUS));
+	size = BMC2HOST;
+	for (i = 0; i < size ; i++)
+		BMC2HOST;
+	BT_CONTROL(BT_H_BUSY);		/* now clear */
+	if (bt_debug)
+		printk("drained %d bytes\n", size + 1);
 }
 
 static inline void write_all_bytes(struct si_sm_data *bt)
@@ -261,201 +321,256 @@
 	int i;
 
 	if (bt_debug & BT_DEBUG_MSG) {
-    		printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
+		printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
 			bt->write_count, bt->seq);
 		for (i = 0; i < bt->write_count; i++)
 			printk (" %02x", bt->write_data[i]);
 		printk ("\n");
 	}
 	for (i = 0; i < bt->write_count; i++)
-	       HOST2BMC(bt->write_data[i]);
+		HOST2BMC(bt->write_data[i]);
 }
 
 static inline int read_all_bytes(struct si_sm_data *bt)
 {
 	unsigned char i;
 
+	/* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+	   Keep layout of first four bytes aligned with write_data[] */
+
 	bt->read_data[0] = BMC2HOST;
 	bt->read_count = bt->read_data[0];
-	if (bt_debug & BT_DEBUG_MSG)
-    		printk(KERN_WARNING "BT: read %d bytes:", bt->read_count);
 
-	/* minimum: length, NetFn, Seq, Cmd, cCode == 5 total, or 4 more
-	   following the length byte. */
 	if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
 		if (bt_debug & BT_DEBUG_MSG)
-			printk("bad length %d\n", bt->read_count);
+			printk(KERN_WARNING "BT: bad raw rsp len=%d\n",
+				bt->read_count);
 		bt->truncated = 1;
 		return 1;	/* let next XACTION START clean it up */
 	}
 	for (i = 1; i <= bt->read_count; i++)
-	       bt->read_data[i] = BMC2HOST;
-	bt->read_count++;	/* account for the length byte */
+		bt->read_data[i] = BMC2HOST;
+	bt->read_count++;	/* Account internally for length byte */
 
 	if (bt_debug & BT_DEBUG_MSG) {
-	    	for (i = 0; i < bt->read_count; i++)
-			printk (" %02x", bt->read_data[i]);
-	    	printk ("\n");
-	}
-	if (bt->seq != bt->write_data[2])	/* idiot check */
-		printk(KERN_DEBUG "BT: internal error: sequence mismatch\n");
+		int max = bt->read_count;
 
-	/* per the spec, the (NetFn, Seq, Cmd) tuples should match */
-	if ((bt->read_data[3] == bt->write_data[3]) &&		/* Cmd */
-        	(bt->read_data[2] == bt->write_data[2]) &&	/* Sequence */
-        	((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+		printk(KERN_WARNING "BT: got %d bytes seq=0x%02X",
+			max, bt->read_data[2]);
+		if (max > 16)
+			max = 16;
+		for (i = 0; i < max; i++)
+			printk (" %02x", bt->read_data[i]);
+		printk ("%s\n", bt->read_count == max ? "" : " ...");
+	}
+
+	/* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
+	if ((bt->read_data[3] == bt->write_data[3]) &&
+	    (bt->read_data[2] == bt->write_data[2]) &&
+	    ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
 			return 1;
 
 	if (bt_debug & BT_DEBUG_MSG)
-	       printk(KERN_WARNING "BT: bad packet: "
+		printk(KERN_WARNING "IPMI BT: bad packet: "
 		"want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
-		bt->write_data[1], bt->write_data[2], bt->write_data[3],
+		bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3],
 		bt->read_data[1],  bt->read_data[2],  bt->read_data[3]);
 	return 0;
 }
 
-/* Modifies bt->state appropriately, need to get into the bt_event() switch */
+/* Restart if retries are left, or return an error completion code */
 
-static void error_recovery(struct si_sm_data *bt, char *reason)
+static enum si_sm_result error_recovery(struct si_sm_data *bt,
+					unsigned char status,
+					unsigned char cCode)
 {
-	unsigned char status;
-	char buf[40]; /* For getting status */
+	char *reason;
 
-	bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */
+	bt->timeout = bt->BT_CAP_req2rsp;
 
-	status = BT_STATUS;
-	printk(KERN_DEBUG "BT: %s in %s %s\n", reason, STATE2TXT,
-	       STATUS2TXT(buf));
+	switch (cCode) {
+	case IPMI_TIMEOUT_ERR:
+		reason = "timeout";
+		break;
+	default:
+		reason = "internal error";
+		break;
+	}
 
+	printk(KERN_WARNING "IPMI BT: %s in %s %s ", 	/* open-ended line */
+		reason, STATE2TXT, STATUS2TXT);
+
+	/* Per the IPMI spec, retries are based on the sequence number
+	   known only to this module, so manage a restart here. */
 	(bt->error_retries)++;
-	if (bt->error_retries > BT_RETRY_LIMIT) {
-		printk(KERN_DEBUG "retry limit (%d) exceeded\n", BT_RETRY_LIMIT);
-		bt->state = BT_STATE_HOSED;
-		if (!bt->nonzero_status)
-			printk(KERN_ERR "IPMI: BT stuck, try power cycle\n");
-		else if (bt->error_retries <= BT_RETRY_LIMIT + 1) {
-			printk(KERN_DEBUG "IPMI: BT reset (takes 5 secs)\n");
-        		bt->state = BT_STATE_RESET1;
+	if (bt->error_retries < bt->BT_CAP_retries) {
+		printk("%d retries left\n",
+			bt->BT_CAP_retries - bt->error_retries);
+		bt->state = BT_STATE_RESTART;
+		return SI_SM_CALL_WITHOUT_DELAY;
+	}
+
+	printk("failed %d retries, sending error response\n",
+		bt->BT_CAP_retries);
+	if (!bt->nonzero_status)
+		printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
+
+	/* this is most likely during insmod */
+	else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
+		printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
+		bt->state = BT_STATE_RESET1;
+		return SI_SM_CALL_WITHOUT_DELAY;
+	}
+
+	/* Concoct a useful error message, set up the next state, and
+	   be done with this sequence. */
+
+	bt->state = BT_STATE_IDLE;
+	switch (cCode) {
+	case IPMI_TIMEOUT_ERR:
+		if (status & BT_B_BUSY) {
+			cCode = IPMI_NODE_BUSY_ERR;
+			bt->state = BT_STATE_LONG_BUSY;
 		}
-	return;
+		break;
+	default:
+		break;
 	}
-
-	/* Sometimes the BMC queues get in an "off-by-one" state...*/
-	if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) {
-    		printk(KERN_DEBUG "retry B2H_WAIT\n");
-		return;
-	}
-
-	printk(KERN_DEBUG "restart command\n");
-	bt->state = BT_STATE_RESTART;
+	force_result(bt, cCode);
+	return SI_SM_TRANSACTION_COMPLETE;
 }
 
-/* Check the status and (possibly) advance the BT state machine.  The
-   default return is SI_SM_CALL_WITH_DELAY. */
+/* Check status and (usually) take action and change this state machine. */
 
 static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 {
-	unsigned char status;
-	char buf[40]; /* For getting status */
+	unsigned char status, BT_CAP[8];
+	static enum bt_states last_printed = BT_STATE_PRINTME;
 	int i;
 
 	status = BT_STATUS;
 	bt->nonzero_status |= status;
-
-	if ((bt_debug & BT_DEBUG_STATES) && (bt->state != bt->last_state))
+	if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
 		printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
 			STATE2TXT,
-			STATUS2TXT(buf),
+			STATUS2TXT,
 			bt->timeout,
 			time);
-	bt->last_state = bt->state;
+		last_printed = bt->state;
+	}
 
-	if (bt->state == BT_STATE_HOSED)
-	       return SI_SM_HOSED;
+	/* Commands that time out may still (eventually) provide a response.
+	   This stale response will get in the way of a new response so remove
+	   it if possible (hopefully during IDLE).  Even if it comes up later
+	   it will be rejected by its (now-forgotten) seq number. */
 
-	if (bt->state != BT_STATE_IDLE) {	/* do timeout test */
+	if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
+		drain_BMC2HOST(bt);
+		BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+	}
+
+	if ((bt->state != BT_STATE_IDLE) &&
+	    (bt->state <  BT_STATE_PRINTME)) {		/* check timeout */
 		bt->timeout -= time;
-		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
-			error_recovery(bt, "timed out");
-			return SI_SM_CALL_WITHOUT_DELAY;
-		}
+		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
+			return error_recovery(bt,
+					      status,
+					      IPMI_TIMEOUT_ERR);
 	}
 
 	switch (bt->state) {
 
-    	case BT_STATE_IDLE:	/* check for asynchronous messages */
+	/* Idle state first checks for asynchronous messages from another
+	   channel, then does some opportunistic housekeeping. */
+
+	case BT_STATE_IDLE:
 		if (status & BT_SMS_ATN) {
 			BT_CONTROL(BT_SMS_ATN);	/* clear it */
 			return SI_SM_ATTN;
 		}
-		return SI_SM_IDLE;
+
+		if (status & BT_H_BUSY)		/* clear a leftover H_BUSY */
+			BT_CONTROL(BT_H_BUSY);
+
+		/* Read BT capabilities if it hasn't been done yet */
+		if (!bt->BT_CAP_outreqs)
+			BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
+					SI_SM_CALL_WITHOUT_DELAY);
+		bt->timeout = bt->BT_CAP_req2rsp;
+		BT_SI_SM_RETURN(SI_SM_IDLE);
 
 	case BT_STATE_XACTION_START:
-		if (status & BT_H_BUSY) {
-			BT_CONTROL(BT_H_BUSY);
-			break;
-		}
-    		if (status & BT_B2H_ATN)
-		       break;
-		bt->state = BT_STATE_WRITE_BYTES;
-		return SI_SM_CALL_WITHOUT_DELAY;	/* for logging */
+		if (status & (BT_B_BUSY | BT_H2B_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		if (BT_STATUS & BT_H_BUSY)
+			BT_CONTROL(BT_H_BUSY);	/* force clear */
+		BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
+				SI_SM_CALL_WITHOUT_DELAY);
 
 	case BT_STATE_WRITE_BYTES:
-		if (status & (BT_B_BUSY | BT_H2B_ATN))
-		       break;
+		if (status & BT_H_BUSY)
+			BT_CONTROL(BT_H_BUSY);	/* clear */
 		BT_CONTROL(BT_CLR_WR_PTR);
 		write_all_bytes(bt);
-		BT_CONTROL(BT_H2B_ATN);	/* clears too fast to catch? */
-		bt->state = BT_STATE_WRITE_CONSUME;
-		return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */
+		BT_CONTROL(BT_H2B_ATN);	/* can clear too fast to catch */
+		BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
+				SI_SM_CALL_WITHOUT_DELAY);
 
-	case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */
-        	if (status & (BT_H2B_ATN | BT_B_BUSY))
-		       break;
-		bt->state = BT_STATE_B2H_WAIT;
-		/* fall through with status */
+	case BT_STATE_WRITE_CONSUME:
+		if (status & (BT_B_BUSY | BT_H2B_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+				SI_SM_CALL_WITHOUT_DELAY);
 
-	/* Stay in BT_STATE_B2H_WAIT until a packet matches.  However, spinning
-	   hard here, constantly reading status, seems to hold off the
-	   generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */
+	/* Spinning hard can suppress B2H_ATN and force a timeout */
 
-	case BT_STATE_B2H_WAIT:
-    		if (!(status & BT_B2H_ATN))
-		       break;
+	case BT_STATE_READ_WAIT:
+		if (!(status & BT_B2H_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		BT_CONTROL(BT_H_BUSY);		/* set */
 
-		/* Assume ordered, uncached writes: no need to wait */
-		if (!(status & BT_H_BUSY))
-		       BT_CONTROL(BT_H_BUSY); /* set */
-		BT_CONTROL(BT_B2H_ATN);		/* clear it, ACK to the BMC */
-		BT_CONTROL(BT_CLR_RD_PTR);	/* reset the queue */
-		i = read_all_bytes(bt);
-		BT_CONTROL(BT_H_BUSY);		/* clear */
-		if (!i)				/* Try this state again */
-		       break;
-		bt->state = BT_STATE_READ_END;
-		return SI_SM_CALL_WITHOUT_DELAY;	/* for logging */
+		/* Uncached, ordered writes should just proceeed serially but
+		   some BMCs don't clear B2H_ATN with one hit.  Fast-path a
+		   workaround without too much penalty to the general case. */
 
-    	case BT_STATE_READ_END:
+		BT_CONTROL(BT_B2H_ATN);		/* clear it to ACK the BMC */
+		BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
+				SI_SM_CALL_WITHOUT_DELAY);
 
-		/* I could wait on BT_H_BUSY to go clear for a truly clean
-		   exit.  However, this is already done in XACTION_START
-		   and the (possible) extra loop/status/possible wait affects
-		   performance.  So, as long as it works, just ignore H_BUSY */
+	case BT_STATE_CLEAR_B2H:
+		if (status & BT_B2H_ATN) {	/* keep hitting it */
+			BT_CONTROL(BT_B2H_ATN);
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		}
+		BT_STATE_CHANGE(BT_STATE_READ_BYTES,
+				SI_SM_CALL_WITHOUT_DELAY);
 
-#ifdef MAKE_THIS_TRUE_IF_NECESSARY
+	case BT_STATE_READ_BYTES:
+		if (!(status & BT_H_BUSY))	/* check in case of retry */
+			BT_CONTROL(BT_H_BUSY);
+		BT_CONTROL(BT_CLR_RD_PTR);	/* start of BMC2HOST buffer */
+		i = read_all_bytes(bt);		/* true == packet seq match */
+		BT_CONTROL(BT_H_BUSY);		/* NOW clear */
+		if (!i) 			/* Not my message */
+			BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+					SI_SM_CALL_WITHOUT_DELAY);
+		bt->state = bt->complete;
+		return bt->state == BT_STATE_IDLE ?	/* where to next? */
+			SI_SM_TRANSACTION_COMPLETE :	/* normal */
+			SI_SM_CALL_WITHOUT_DELAY;	/* Startup magic */
 
-		if (status & BT_H_BUSY)
-		       break;
-#endif
-		bt->seq++;
-		bt->state = BT_STATE_IDLE;
-		return SI_SM_TRANSACTION_COMPLETE;
+	case BT_STATE_LONG_BUSY:	/* For example: after FW update */
+		if (!(status & BT_B_BUSY)) {
+			reset_flags(bt);	/* next state is now IDLE */
+			bt_init_data(bt, bt->io);
+		}
+		return SI_SM_CALL_WITH_DELAY;	/* No repeat printing */
 
 	case BT_STATE_RESET1:
-    		reset_flags(bt);
-    		bt->timeout = BT_RESET_DELAY;
-		bt->state = BT_STATE_RESET2;
-		break;
+		reset_flags(bt);
+		drain_BMC2HOST(bt);
+		BT_STATE_CHANGE(BT_STATE_RESET2,
+				SI_SM_CALL_WITH_DELAY);
 
 	case BT_STATE_RESET2:		/* Send a soft reset */
 		BT_CONTROL(BT_CLR_WR_PTR);
@@ -464,29 +579,59 @@
 		HOST2BMC(42);		/* Sequence number */
 		HOST2BMC(3);		/* Cmd == Soft reset */
 		BT_CONTROL(BT_H2B_ATN);
-		bt->state = BT_STATE_RESET3;
-		break;
+		bt->timeout = BT_RESET_DELAY * 1000000;
+		BT_STATE_CHANGE(BT_STATE_RESET3,
+				SI_SM_CALL_WITH_DELAY);
 
-	case BT_STATE_RESET3:
+	case BT_STATE_RESET3:		/* Hold off everything for a bit */
 		if (bt->timeout > 0)
-		       return SI_SM_CALL_WITH_DELAY;
-		bt->state = BT_STATE_RESTART;	/* printk in debug modes */
-		break;
+			return SI_SM_CALL_WITH_DELAY;
+		drain_BMC2HOST(bt);
+		BT_STATE_CHANGE(BT_STATE_RESTART,
+				SI_SM_CALL_WITH_DELAY);
 
-	case BT_STATE_RESTART:		/* don't reset retries! */
-		reset_flags(bt);
-		bt->write_data[2] = ++bt->seq;
+	case BT_STATE_RESTART:		/* don't reset retries or seq! */
 		bt->read_count = 0;
 		bt->nonzero_status = 0;
-		bt->timeout = BT_NORMAL_TIMEOUT;
-		bt->state = BT_STATE_XACTION_START;
-		break;
+		bt->timeout = bt->BT_CAP_req2rsp;
+		BT_STATE_CHANGE(BT_STATE_XACTION_START,
+				SI_SM_CALL_WITH_DELAY);
 
-	default:	/* HOSED is supposed to be caught much earlier */
-		error_recovery(bt, "internal logic error");
-		break;
-  	}
-  	return SI_SM_CALL_WITH_DELAY;
+	/* Get BT Capabilities, using timing of upper level state machine.
+	   Set outreqs to prevent infinite loop on timeout. */
+	case BT_STATE_CAPABILITIES_BEGIN:
+		bt->BT_CAP_outreqs = 1;
+		{
+			unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+			bt->state = BT_STATE_IDLE;
+			bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+		}
+		bt->complete = BT_STATE_CAPABILITIES_END;
+		BT_STATE_CHANGE(BT_STATE_XACTION_START,
+				SI_SM_CALL_WITH_DELAY);
+
+	case BT_STATE_CAPABILITIES_END:
+		i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+		bt_init_data(bt, bt->io);
+		if ((i == 8) && !BT_CAP[2]) {
+			bt->BT_CAP_outreqs = BT_CAP[3];
+			bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000;
+			bt->BT_CAP_retries = BT_CAP[7];
+		} else
+			printk(KERN_WARNING "IPMI BT: using default values\n");
+		if (!bt->BT_CAP_outreqs)
+			bt->BT_CAP_outreqs = 1;
+		printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
+			bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries);
+		bt->timeout = bt->BT_CAP_req2rsp;
+		return SI_SM_CALL_WITHOUT_DELAY;
+
+	default:	/* should never occur */
+		return error_recovery(bt,
+				      status,
+				      IPMI_ERR_UNSPECIFIED);
+	}
+	return SI_SM_CALL_WITH_DELAY;
 }
 
 static int bt_detect(struct si_sm_data *bt)
@@ -497,7 +642,7 @@
 	   test that first.  The calling routine uses negative logic. */
 
 	if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
-	       return 1;
+		return 1;
 	reset_flags(bt);
 	return 0;
 }
@@ -513,11 +658,11 @@
 
 struct si_sm_handlers bt_smi_handlers =
 {
-	.init_data         = bt_init_data,
-	.start_transaction = bt_start_transaction,
-	.get_result        = bt_get_result,
-	.event             = bt_event,
-	.detect            = bt_detect,
-	.cleanup           = bt_cleanup,
-	.size              = bt_size,
+	.init_data		= bt_init_data,
+	.start_transaction	= bt_start_transaction,
+	.get_result		= bt_get_result,
+	.event			= bt_event,
+	.detect			= bt_detect,
+	.cleanup		= bt_cleanup,
+	.size			= bt_size,
 };
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 81fcf0c..375d337 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -596,6 +596,31 @@
 		rv = 0;
 		break;
 	}
+
+	case IPMICTL_GET_MAINTENANCE_MODE_CMD:
+	{
+		int mode;
+
+		mode = ipmi_get_maintenance_mode(priv->user);
+		if (copy_to_user(arg, &mode, sizeof(mode))) {
+			rv = -EFAULT;
+			break;
+		}
+		rv = 0;
+		break;
+	}
+
+	case IPMICTL_SET_MAINTENANCE_MODE_CMD:
+	{
+		int mode;
+
+		if (copy_from_user(&mode, arg, sizeof(mode))) {
+			rv = -EFAULT;
+			break;
+		}
+		rv = ipmi_set_maintenance_mode(priv->user, mode);
+		break;
+	}
 	}
   
 	return rv;
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 2062675..c1b8228 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -93,8 +93,8 @@
 				   state machine. */
 };
 
-#define MAX_KCS_READ_SIZE 80
-#define MAX_KCS_WRITE_SIZE 80
+#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
+#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
 
 /* Timeouts in microseconds. */
 #define IBF_RETRY_TIMEOUT 1000000
@@ -261,12 +261,14 @@
 {
 	unsigned int i;
 
-	if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) {
-		return -1;
-	}
-	if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
-		return -2;
-	}
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > MAX_KCS_WRITE_SIZE)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED))
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
 	if (kcs_debug & KCS_DEBUG_MSG) {
 		printk(KERN_DEBUG "start_kcs_transaction -");
 		for (i = 0; i < size; i ++) {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c47add8..5703ee2 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -48,7 +48,7 @@
 
 #define PFX "IPMI message handler: "
 
-#define IPMI_DRIVER_VERSION "39.0"
+#define IPMI_DRIVER_VERSION "39.1"
 
 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
 static int ipmi_init_msghandler(void);
@@ -59,6 +59,9 @@
 static struct proc_dir_entry *proc_ipmi_root = NULL;
 #endif /* CONFIG_PROC_FS */
 
+/* Remain in auto-maintenance mode for this amount of time (in ms). */
+#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
+
 #define MAX_EVENTS_IN_QUEUE	25
 
 /* Don't let a message sit in a queue forever, always time it with at lest
@@ -193,17 +196,28 @@
 
 	struct kref refcount;
 
+	/* Used for a list of interfaces. */
+	struct list_head link;
+
 	/* The list of upper layers that are using me.  seq_lock
 	 * protects this. */
 	struct list_head users;
 
+	/* Information to supply to users. */
+	unsigned char ipmi_version_major;
+	unsigned char ipmi_version_minor;
+
 	/* Used for wake ups at startup. */
 	wait_queue_head_t waitq;
 
 	struct bmc_device *bmc;
 	char *my_dev_name;
+	char *sysfs_name;
 
-	/* This is the lower-layer's sender routine. */
+	/* This is the lower-layer's sender routine.  Note that you
+	 * must either be holding the ipmi_interfaces_mutex or be in
+	 * an umpreemptible region to use this.  You must fetch the
+	 * value into a local variable and make sure it is not NULL. */
 	struct ipmi_smi_handlers *handlers;
 	void                     *send_info;
 
@@ -242,6 +256,7 @@
 	spinlock_t       events_lock; /* For dealing with event stuff. */
 	struct list_head waiting_events;
 	unsigned int     waiting_events_count; /* How many events in queue? */
+	int              delivering_events;
 
 	/* The event receiver for my BMC, only really used at panic
 	   shutdown as a place to store this. */
@@ -250,6 +265,12 @@
 	unsigned char local_sel_device;
 	unsigned char local_event_generator;
 
+	/* For handling of maintenance mode. */
+	int maintenance_mode;
+	int maintenance_mode_enable;
+	int auto_maintenance_timeout;
+	spinlock_t maintenance_mode_lock; /* Used in a timer... */
+
 	/* A cheap hack, if this is non-null and a message to an
 	   interface comes in with a NULL user, call this routine with
 	   it.  Note that the message will still be freed by the
@@ -338,13 +359,6 @@
 };
 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
 
-/* Used to mark an interface entry that cannot be used but is not a
- * free entry, either, primarily used at creation and deletion time so
- * a slot doesn't get reused too quickly. */
-#define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
-#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
-				   || (i == IPMI_INVALID_INTERFACE_ENTRY))
-
 /**
  * The driver model view of the IPMI messaging driver.
  */
@@ -354,16 +368,13 @@
 };
 static DEFINE_MUTEX(ipmidriver_mutex);
 
-#define MAX_IPMI_INTERFACES 4
-static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
-
-/* Directly protects the ipmi_interfaces data structure. */
-static DEFINE_SPINLOCK(interfaces_lock);
+static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
+static DEFINE_MUTEX(ipmi_interfaces_mutex);
 
 /* List of watchers that want to know when smi's are added and
    deleted. */
 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
-static DECLARE_RWSEM(smi_watchers_sem);
+static DEFINE_MUTEX(smi_watchers_mutex);
 
 
 static void free_recv_msg_list(struct list_head *q)
@@ -423,48 +434,84 @@
 	kfree(intf);
 }
 
+struct watcher_entry {
+	int              intf_num;
+	ipmi_smi_t       intf;
+	struct list_head link;
+};
+
 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
 {
-	int           i;
-	unsigned long flags;
+	ipmi_smi_t intf;
+	struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
+	struct watcher_entry *e, *e2;
 
-	down_write(&smi_watchers_sem);
-	list_add(&(watcher->link), &smi_watchers);
-	up_write(&smi_watchers_sem);
-	spin_lock_irqsave(&interfaces_lock, flags);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		ipmi_smi_t intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+	mutex_lock(&smi_watchers_mutex);
+
+	mutex_lock(&ipmi_interfaces_mutex);
+
+	/* Build a list of things to deliver. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == -1)
 			continue;
-		spin_unlock_irqrestore(&interfaces_lock, flags);
-		watcher->new_smi(i, intf->si_dev);
-		spin_lock_irqsave(&interfaces_lock, flags);
+		e = kmalloc(sizeof(*e), GFP_KERNEL);
+		if (!e)
+			goto out_err;
+		kref_get(&intf->refcount);
+		e->intf = intf;
+		e->intf_num = intf->intf_num;
+		list_add_tail(&e->link, &to_deliver);
 	}
-	spin_unlock_irqrestore(&interfaces_lock, flags);
+
+	/* We will succeed, so add it to the list. */
+	list_add(&watcher->link, &smi_watchers);
+
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	list_for_each_entry_safe(e, e2, &to_deliver, link) {
+		list_del(&e->link);
+		watcher->new_smi(e->intf_num, e->intf->si_dev);
+		kref_put(&e->intf->refcount, intf_free);
+		kfree(e);
+	}
+
+	mutex_unlock(&smi_watchers_mutex);
+
 	return 0;
+
+ out_err:
+	mutex_unlock(&ipmi_interfaces_mutex);
+	mutex_unlock(&smi_watchers_mutex);
+	list_for_each_entry_safe(e, e2, &to_deliver, link) {
+		list_del(&e->link);
+		kref_put(&e->intf->refcount, intf_free);
+		kfree(e);
+	}
+	return -ENOMEM;
 }
 
 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
 {
-	down_write(&smi_watchers_sem);
+	mutex_lock(&smi_watchers_mutex);
 	list_del(&(watcher->link));
-	up_write(&smi_watchers_sem);
+	mutex_unlock(&smi_watchers_mutex);
 	return 0;
 }
 
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
 static void
 call_smi_watchers(int i, struct device *dev)
 {
 	struct ipmi_smi_watcher *w;
 
-	down_read(&smi_watchers_sem);
 	list_for_each_entry(w, &smi_watchers, link) {
 		if (try_module_get(w->owner)) {
 			w->new_smi(i, dev);
 			module_put(w->owner);
 		}
 	}
-	up_read(&smi_watchers_sem);
 }
 
 static int
@@ -590,6 +637,17 @@
 	}
 }
 
+static void
+deliver_err_response(struct ipmi_recv_msg *msg, int err)
+{
+	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	msg->msg_data[0] = err;
+	msg->msg.netfn |= 1; /* Convert to a response. */
+	msg->msg.data_len = 1;
+	msg->msg.data = msg->msg_data;
+	deliver_response(msg);
+}
+
 /* Find the next sequence number not being used and add the given
    message with the given timeout to the sequence table.  This must be
    called with the interface's seq_lock held. */
@@ -727,14 +785,8 @@
 	}
 	spin_unlock_irqrestore(&(intf->seq_lock), flags);
 
-	if (msg) {
-		msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-		msg->msg_data[0] = err;
-		msg->msg.netfn |= 1; /* Convert to a response. */
-		msg->msg.data_len = 1;
-		msg->msg.data = msg->msg_data;
-		deliver_response(msg);
-	}
+	if (msg)
+		deliver_err_response(msg, err);
 
 	return rv;
 }
@@ -776,17 +828,18 @@
 	if (!new_user)
 		return -ENOMEM;
 
-	spin_lock_irqsave(&interfaces_lock, flags);
-	intf = ipmi_interfaces[if_num];
-	if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
-		spin_unlock_irqrestore(&interfaces_lock, flags);
-		rv = -EINVAL;
-		goto out_kfree;
+	mutex_lock(&ipmi_interfaces_mutex);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == if_num)
+			goto found;
 	}
+	/* Not found, return an error */
+	rv = -EINVAL;
+	goto out_kfree;
 
+ found:
 	/* Note that each existing user holds a refcount to the interface. */
 	kref_get(&intf->refcount);
-	spin_unlock_irqrestore(&interfaces_lock, flags);
 
 	kref_init(&new_user->refcount);
 	new_user->handler = handler;
@@ -807,6 +860,10 @@
 		}
 	}
 
+	/* Hold the lock so intf->handlers is guaranteed to be good
+	 * until now */
+	mutex_unlock(&ipmi_interfaces_mutex);
+
 	new_user->valid = 1;
 	spin_lock_irqsave(&intf->seq_lock, flags);
 	list_add_rcu(&new_user->link, &intf->users);
@@ -817,6 +874,7 @@
 out_kref:
 	kref_put(&intf->refcount, intf_free);
 out_kfree:
+	mutex_unlock(&ipmi_interfaces_mutex);
 	kfree(new_user);
 	return rv;
 }
@@ -846,6 +904,7 @@
 		    && (intf->seq_table[i].recv_msg->user == user))
 		{
 			intf->seq_table[i].inuse = 0;
+			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
 		}
 	}
 	spin_unlock_irqrestore(&intf->seq_lock, flags);
@@ -872,9 +931,13 @@
 		kfree(rcvr);
 	}
 
-	module_put(intf->handlers->owner);
-	if (intf->handlers->dec_usecount)
-		intf->handlers->dec_usecount(intf->send_info);
+	mutex_lock(&ipmi_interfaces_mutex);
+	if (intf->handlers) {
+		module_put(intf->handlers->owner);
+		if (intf->handlers->dec_usecount)
+			intf->handlers->dec_usecount(intf->send_info);
+	}
+	mutex_unlock(&ipmi_interfaces_mutex);
 
 	kref_put(&intf->refcount, intf_free);
 
@@ -887,8 +950,8 @@
 		      unsigned char *major,
 		      unsigned char *minor)
 {
-	*major = ipmi_version_major(&user->intf->bmc->id);
-	*minor = ipmi_version_minor(&user->intf->bmc->id);
+	*major = user->intf->ipmi_version_major;
+	*minor = user->intf->ipmi_version_minor;
 }
 
 int ipmi_set_my_address(ipmi_user_t   user,
@@ -931,6 +994,65 @@
 	return 0;
 }
 
+int ipmi_get_maintenance_mode(ipmi_user_t user)
+{
+	int           mode;
+	unsigned long flags;
+
+	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
+	mode = user->intf->maintenance_mode;
+	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
+
+	return mode;
+}
+EXPORT_SYMBOL(ipmi_get_maintenance_mode);
+
+static void maintenance_mode_update(ipmi_smi_t intf)
+{
+	if (intf->handlers->set_maintenance_mode)
+		intf->handlers->set_maintenance_mode(
+			intf->send_info, intf->maintenance_mode_enable);
+}
+
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
+{
+	int           rv = 0;
+	unsigned long flags;
+	ipmi_smi_t    intf = user->intf;
+
+	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+	if (intf->maintenance_mode != mode) {
+		switch (mode) {
+		case IPMI_MAINTENANCE_MODE_AUTO:
+			intf->maintenance_mode = mode;
+			intf->maintenance_mode_enable
+				= (intf->auto_maintenance_timeout > 0);
+			break;
+
+		case IPMI_MAINTENANCE_MODE_OFF:
+			intf->maintenance_mode = mode;
+			intf->maintenance_mode_enable = 0;
+			break;
+
+		case IPMI_MAINTENANCE_MODE_ON:
+			intf->maintenance_mode = mode;
+			intf->maintenance_mode_enable = 1;
+			break;
+
+		default:
+			rv = -EINVAL;
+			goto out_unlock;
+		}
+
+		maintenance_mode_update(intf);
+	}
+ out_unlock:
+	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_set_maintenance_mode);
+
 int ipmi_set_gets_events(ipmi_user_t user, int val)
 {
 	unsigned long        flags;
@@ -943,20 +1065,33 @@
 	spin_lock_irqsave(&intf->events_lock, flags);
 	user->gets_events = val;
 
-	if (val) {
-		/* Deliver any queued events. */
+	if (intf->delivering_events)
+		/*
+		 * Another thread is delivering events for this, so
+		 * let it handle any new events.
+		 */
+		goto out;
+
+	/* Deliver any queued events. */
+	while (user->gets_events && !list_empty(&intf->waiting_events)) {
 		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
 			list_move_tail(&msg->link, &msgs);
 		intf->waiting_events_count = 0;
+
+		intf->delivering_events = 1;
+		spin_unlock_irqrestore(&intf->events_lock, flags);
+
+		list_for_each_entry_safe(msg, msg2, &msgs, link) {
+			msg->user = user;
+			kref_get(&user->refcount);
+			deliver_response(msg);
+		}
+
+		spin_lock_irqsave(&intf->events_lock, flags);
+		intf->delivering_events = 0;
 	}
 
-	/* Hold the events lock while doing this to preserve order. */
-	list_for_each_entry_safe(msg, msg2, &msgs, link) {
-		msg->user = user;
-		kref_get(&user->refcount);
-		deliver_response(msg);
-	}
-
+ out:
 	spin_unlock_irqrestore(&intf->events_lock, flags);
 
 	return 0;
@@ -1067,7 +1202,8 @@
 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
 {
 	ipmi_smi_t intf = user->intf;
-	intf->handlers->set_run_to_completion(intf->send_info, val);
+	if (intf->handlers)
+		intf->handlers->set_run_to_completion(intf->send_info, val);
 }
 
 static unsigned char
@@ -1178,10 +1314,11 @@
 			  int                  retries,
 			  unsigned int         retry_time_ms)
 {
-	int                  rv = 0;
-	struct ipmi_smi_msg  *smi_msg;
-	struct ipmi_recv_msg *recv_msg;
-	unsigned long        flags;
+	int                      rv = 0;
+	struct ipmi_smi_msg      *smi_msg;
+	struct ipmi_recv_msg     *recv_msg;
+	unsigned long            flags;
+	struct ipmi_smi_handlers *handlers;
 
 
 	if (supplied_recv) {
@@ -1204,6 +1341,13 @@
 		}
 	}
 
+	rcu_read_lock();
+	handlers = intf->handlers;
+	if (!handlers) {
+		rv = -ENODEV;
+		goto out_err;
+	}
+
 	recv_msg->user = user;
 	if (user)
 		kref_get(&user->refcount);
@@ -1246,6 +1390,24 @@
 			goto out_err;
 		}
 
+		if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
+		      && ((msg->cmd == IPMI_COLD_RESET_CMD)
+			  || (msg->cmd == IPMI_WARM_RESET_CMD)))
+		     || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
+		{
+			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+			intf->auto_maintenance_timeout
+				= IPMI_MAINTENANCE_MODE_TIMEOUT;
+			if (!intf->maintenance_mode
+			    && !intf->maintenance_mode_enable)
+			{
+				intf->maintenance_mode_enable = 1;
+				maintenance_mode_update(intf);
+			}
+			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+					       flags);
+		}
+
 		if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
 			spin_lock_irqsave(&intf->counter_lock, flags);
 			intf->sent_invalid_commands++;
@@ -1520,11 +1682,14 @@
 		printk("\n");
 	}
 #endif
-	intf->handlers->sender(intf->send_info, smi_msg, priority);
+
+	handlers->sender(intf->send_info, smi_msg, priority);
+	rcu_read_unlock();
 
 	return 0;
 
  out_err:
+	rcu_read_unlock();
 	ipmi_free_smi_msg(smi_msg);
 	ipmi_free_recv_msg(recv_msg);
 	return rv;
@@ -1604,6 +1769,7 @@
 			      -1, 0);
 }
 
+#ifdef CONFIG_PROC_FS
 static int ipmb_file_read_proc(char *page, char **start, off_t off,
 			       int count, int *eof, void *data)
 {
@@ -1692,6 +1858,7 @@
 
 	return (out - ((char *) page));
 }
+#endif /* CONFIG_PROC_FS */
 
 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
 			    read_proc_t *read_proc, write_proc_t *write_proc,
@@ -1817,13 +1984,12 @@
 	struct bmc_device *bmc = dev_get_drvdata(dev);
 
 	return (bmc->id.product_id == id->product_id
-		&& bmc->id.product_id == id->product_id
 		&& bmc->id.device_id == id->device_id);
 }
 
 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
 	struct device_driver *drv,
-	unsigned char product_id, unsigned char device_id)
+	unsigned int product_id, unsigned char device_id)
 {
 	struct prod_dev_id id = {
 		.product_id = product_id,
@@ -1940,6 +2106,9 @@
 
 static void remove_files(struct bmc_device *bmc)
 {
+	if (!bmc->dev)
+		return;
+
 	device_remove_file(&bmc->dev->dev,
 			   &bmc->device_id_attr);
 	device_remove_file(&bmc->dev->dev,
@@ -1973,7 +2142,8 @@
 	bmc = container_of(ref, struct bmc_device, refcount);
 
 	remove_files(bmc);
-	platform_device_unregister(bmc->dev);
+	if (bmc->dev)
+		platform_device_unregister(bmc->dev);
 	kfree(bmc);
 }
 
@@ -1981,7 +2151,11 @@
 {
 	struct bmc_device *bmc = intf->bmc;
 
-	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+	if (intf->sysfs_name) {
+		sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
+		kfree(intf->sysfs_name);
+		intf->sysfs_name = NULL;
+	}
 	if (intf->my_dev_name) {
 		sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
 		kfree(intf->my_dev_name);
@@ -1990,6 +2164,7 @@
 
 	mutex_lock(&ipmidriver_mutex);
 	kref_put(&bmc->refcount, cleanup_bmc_device);
+	intf->bmc = NULL;
 	mutex_unlock(&ipmidriver_mutex);
 }
 
@@ -1997,6 +2172,56 @@
 {
 	int err;
 
+	bmc->device_id_attr.attr.name = "device_id";
+	bmc->device_id_attr.attr.owner = THIS_MODULE;
+	bmc->device_id_attr.attr.mode = S_IRUGO;
+	bmc->device_id_attr.show = device_id_show;
+
+	bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
+	bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
+	bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
+	bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
+
+	bmc->revision_attr.attr.name = "revision";
+	bmc->revision_attr.attr.owner = THIS_MODULE;
+	bmc->revision_attr.attr.mode = S_IRUGO;
+	bmc->revision_attr.show = revision_show;
+
+	bmc->firmware_rev_attr.attr.name = "firmware_revision";
+	bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
+	bmc->firmware_rev_attr.attr.mode = S_IRUGO;
+	bmc->firmware_rev_attr.show = firmware_rev_show;
+
+	bmc->version_attr.attr.name = "ipmi_version";
+	bmc->version_attr.attr.owner = THIS_MODULE;
+	bmc->version_attr.attr.mode = S_IRUGO;
+	bmc->version_attr.show = ipmi_version_show;
+
+	bmc->add_dev_support_attr.attr.name = "additional_device_support";
+	bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
+	bmc->add_dev_support_attr.attr.mode = S_IRUGO;
+	bmc->add_dev_support_attr.show = add_dev_support_show;
+
+	bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
+	bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
+	bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
+	bmc->manufacturer_id_attr.show = manufacturer_id_show;
+
+	bmc->product_id_attr.attr.name = "product_id";
+	bmc->product_id_attr.attr.owner = THIS_MODULE;
+	bmc->product_id_attr.attr.mode = S_IRUGO;
+	bmc->product_id_attr.show = product_id_show;
+
+	bmc->guid_attr.attr.name = "guid";
+	bmc->guid_attr.attr.owner = THIS_MODULE;
+	bmc->guid_attr.attr.mode = S_IRUGO;
+	bmc->guid_attr.show = guid_show;
+
+	bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
+	bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
+	bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
+	bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
+
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->device_id_attr);
 	if (err) goto out;
@@ -2066,7 +2291,8 @@
 	return err;
 }
 
-static int ipmi_bmc_register(ipmi_smi_t intf)
+static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
+			     const char *sysfs_name)
 {
 	int               rv;
 	struct bmc_device *bmc = intf->bmc;
@@ -2106,9 +2332,39 @@
 		       bmc->id.product_id,
 		       bmc->id.device_id);
 	} else {
-		bmc->dev = platform_device_alloc("ipmi_bmc",
-						 bmc->id.device_id);
+		char name[14];
+		unsigned char orig_dev_id = bmc->id.device_id;
+		int warn_printed = 0;
+
+		snprintf(name, sizeof(name),
+			 "ipmi_bmc.%4.4x", bmc->id.product_id);
+
+		while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
+						 bmc->id.product_id,
+						 bmc->id.device_id))
+		{
+			if (!warn_printed) {
+				printk(KERN_WARNING PFX
+				       "This machine has two different BMCs"
+				       " with the same product id and device"
+				       " id.  This is an error in the"
+				       " firmware, but incrementing the"
+				       " device id to work around the problem."
+				       " Prod ID = 0x%x, Dev ID = 0x%x\n",
+				       bmc->id.product_id, bmc->id.device_id);
+				warn_printed = 1;
+			}
+			bmc->id.device_id++; /* Wraps at 255 */
+			if (bmc->id.device_id == orig_dev_id) {
+				printk(KERN_ERR PFX
+				       "Out of device ids!\n");
+				break;
+			}
+		}
+
+		bmc->dev = platform_device_alloc(name, bmc->id.device_id);
 		if (!bmc->dev) {
+			mutex_unlock(&ipmidriver_mutex);
 			printk(KERN_ERR
 			       "ipmi_msghandler:"
 			       " Unable to allocate platform device\n");
@@ -2121,6 +2377,8 @@
 		rv = platform_device_add(bmc->dev);
 		mutex_unlock(&ipmidriver_mutex);
 		if (rv) {
+			platform_device_put(bmc->dev);
+			bmc->dev = NULL;
 			printk(KERN_ERR
 			       "ipmi_msghandler:"
 			       " Unable to register bmc device: %d\n",
@@ -2130,57 +2388,6 @@
 			return rv;
 		}
 
-		bmc->device_id_attr.attr.name = "device_id";
-		bmc->device_id_attr.attr.owner = THIS_MODULE;
-		bmc->device_id_attr.attr.mode = S_IRUGO;
-		bmc->device_id_attr.show = device_id_show;
-
-		bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
-		bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
-		bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
-		bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
-
-		bmc->revision_attr.attr.name = "revision";
-		bmc->revision_attr.attr.owner = THIS_MODULE;
-		bmc->revision_attr.attr.mode = S_IRUGO;
-		bmc->revision_attr.show = revision_show;
-
-		bmc->firmware_rev_attr.attr.name = "firmware_revision";
-		bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
-		bmc->firmware_rev_attr.attr.mode = S_IRUGO;
-		bmc->firmware_rev_attr.show = firmware_rev_show;
-
-		bmc->version_attr.attr.name = "ipmi_version";
-		bmc->version_attr.attr.owner = THIS_MODULE;
-		bmc->version_attr.attr.mode = S_IRUGO;
-		bmc->version_attr.show = ipmi_version_show;
-
-		bmc->add_dev_support_attr.attr.name
-			= "additional_device_support";
-		bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
-		bmc->add_dev_support_attr.attr.mode = S_IRUGO;
-		bmc->add_dev_support_attr.show = add_dev_support_show;
-
-		bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
-		bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
-		bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
-		bmc->manufacturer_id_attr.show = manufacturer_id_show;
-
-		bmc->product_id_attr.attr.name = "product_id";
-		bmc->product_id_attr.attr.owner = THIS_MODULE;
-		bmc->product_id_attr.attr.mode = S_IRUGO;
-		bmc->product_id_attr.show = product_id_show;
-
-		bmc->guid_attr.attr.name = "guid";
-		bmc->guid_attr.attr.owner = THIS_MODULE;
-		bmc->guid_attr.attr.mode = S_IRUGO;
-		bmc->guid_attr.show = guid_show;
-
-		bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
-		bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
-		bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
-		bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
-
 		rv = create_files(bmc);
 		if (rv) {
 			mutex_lock(&ipmidriver_mutex);
@@ -2202,29 +2409,44 @@
 	 * create symlink from system interface device to bmc device
 	 * and back.
 	 */
+	intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
+	if (!intf->sysfs_name) {
+		rv = -ENOMEM;
+		printk(KERN_ERR
+		       "ipmi_msghandler: allocate link to BMC: %d\n",
+		       rv);
+		goto out_err;
+	}
+
 	rv = sysfs_create_link(&intf->si_dev->kobj,
-			       &bmc->dev->dev.kobj, "bmc");
+			       &bmc->dev->dev.kobj, intf->sysfs_name);
 	if (rv) {
+		kfree(intf->sysfs_name);
+		intf->sysfs_name = NULL;
 		printk(KERN_ERR
 		       "ipmi_msghandler: Unable to create bmc symlink: %d\n",
 		       rv);
 		goto out_err;
 	}
 
-	size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
+	size = snprintf(dummy, 0, "ipmi%d", ifnum);
 	intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
 	if (!intf->my_dev_name) {
+		kfree(intf->sysfs_name);
+		intf->sysfs_name = NULL;
 		rv = -ENOMEM;
 		printk(KERN_ERR
 		       "ipmi_msghandler: allocate link from BMC: %d\n",
 		       rv);
 		goto out_err;
 	}
-	snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
+	snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
 
 	rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
 			       intf->my_dev_name);
 	if (rv) {
+		kfree(intf->sysfs_name);
+		intf->sysfs_name = NULL;
 		kfree(intf->my_dev_name);
 		intf->my_dev_name = NULL;
 		printk(KERN_ERR
@@ -2409,17 +2631,14 @@
 		      void		       *send_info,
 		      struct ipmi_device_id    *device_id,
 		      struct device            *si_dev,
+		      const char               *sysfs_name,
 		      unsigned char            slave_addr)
 {
 	int              i, j;
 	int              rv;
 	ipmi_smi_t       intf;
-	unsigned long    flags;
-	int              version_major;
-	int              version_minor;
-
-	version_major = ipmi_version_major(device_id);
-	version_minor = ipmi_version_minor(device_id);
+	ipmi_smi_t       tintf;
+	struct list_head *link;
 
 	/* Make sure the driver is actually initialized, this handles
 	   problems with initialization order. */
@@ -2437,12 +2656,16 @@
 	if (!intf)
 		return -ENOMEM;
 	memset(intf, 0, sizeof(*intf));
+
+	intf->ipmi_version_major = ipmi_version_major(device_id);
+	intf->ipmi_version_minor = ipmi_version_minor(device_id);
+
 	intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
 	if (!intf->bmc) {
 		kfree(intf);
 		return -ENOMEM;
 	}
-	intf->intf_num = -1;
+	intf->intf_num = -1; /* Mark it invalid for now. */
 	kref_init(&intf->refcount);
 	intf->bmc->id = *device_id;
 	intf->si_dev = si_dev;
@@ -2470,26 +2693,30 @@
 	INIT_LIST_HEAD(&intf->waiting_events);
 	intf->waiting_events_count = 0;
 	mutex_init(&intf->cmd_rcvrs_mutex);
+	spin_lock_init(&intf->maintenance_mode_lock);
 	INIT_LIST_HEAD(&intf->cmd_rcvrs);
 	init_waitqueue_head(&intf->waitq);
 
 	spin_lock_init(&intf->counter_lock);
 	intf->proc_dir = NULL;
 
-	rv = -ENOMEM;
-	spin_lock_irqsave(&interfaces_lock, flags);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		if (ipmi_interfaces[i] == NULL) {
-			intf->intf_num = i;
-			/* Reserve the entry till we are done. */
-			ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
-			rv = 0;
+	mutex_lock(&smi_watchers_mutex);
+	mutex_lock(&ipmi_interfaces_mutex);
+	/* Look for a hole in the numbers. */
+	i = 0;
+	link = &ipmi_interfaces;
+	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
+		if (tintf->intf_num != i) {
+			link = &tintf->link;
 			break;
 		}
+		i++;
 	}
-	spin_unlock_irqrestore(&interfaces_lock, flags);
-	if (rv)
-		goto out;
+	/* Add the new interface in numeric order. */
+	if (i == 0)
+		list_add_rcu(&intf->link, &ipmi_interfaces);
+	else
+		list_add_tail_rcu(&intf->link, link);
 
 	rv = handlers->start_processing(send_info, intf);
 	if (rv)
@@ -2497,8 +2724,9 @@
 
 	get_guid(intf);
 
-	if ((version_major > 1)
-	    || ((version_major == 1) && (version_minor >= 5)))
+	if ((intf->ipmi_version_major > 1)
+	    || ((intf->ipmi_version_major == 1)
+		&& (intf->ipmi_version_minor >= 5)))
 	{
 		/* Start scanning the channels to see what is
 		   available. */
@@ -2521,64 +2749,67 @@
 	if (rv == 0)
 		rv = add_proc_entries(intf, i);
 
-	rv = ipmi_bmc_register(intf);
+	rv = ipmi_bmc_register(intf, i, sysfs_name);
 
  out:
 	if (rv) {
 		if (intf->proc_dir)
 			remove_proc_entries(intf);
+		intf->handlers = NULL;
+		list_del_rcu(&intf->link);
+		mutex_unlock(&ipmi_interfaces_mutex);
+		mutex_unlock(&smi_watchers_mutex);
+		synchronize_rcu();
 		kref_put(&intf->refcount, intf_free);
-		if (i < MAX_IPMI_INTERFACES) {
-			spin_lock_irqsave(&interfaces_lock, flags);
-			ipmi_interfaces[i] = NULL;
-			spin_unlock_irqrestore(&interfaces_lock, flags);
-		}
 	} else {
-		spin_lock_irqsave(&interfaces_lock, flags);
-		ipmi_interfaces[i] = intf;
-		spin_unlock_irqrestore(&interfaces_lock, flags);
+		/* After this point the interface is legal to use. */
+		intf->intf_num = i;
+		mutex_unlock(&ipmi_interfaces_mutex);
 		call_smi_watchers(i, intf->si_dev);
+		mutex_unlock(&smi_watchers_mutex);
 	}
 
 	return rv;
 }
 
+static void cleanup_smi_msgs(ipmi_smi_t intf)
+{
+	int              i;
+	struct seq_table *ent;
+
+	/* No need for locks, the interface is down. */
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+		ent = &(intf->seq_table[i]);
+		if (!ent->inuse)
+			continue;
+		deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+	}
+}
+
 int ipmi_unregister_smi(ipmi_smi_t intf)
 {
-	int                     i;
 	struct ipmi_smi_watcher *w;
-	unsigned long           flags;
+	int    intf_num = intf->intf_num;
 
 	ipmi_bmc_unregister(intf);
 
-	spin_lock_irqsave(&interfaces_lock, flags);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		if (ipmi_interfaces[i] == intf) {
-			/* Set the interface number reserved until we
-			 * are done. */
-			ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
-			intf->intf_num = -1;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&interfaces_lock,flags);
+	mutex_lock(&smi_watchers_mutex);
+	mutex_lock(&ipmi_interfaces_mutex);
+	intf->intf_num = -1;
+	intf->handlers = NULL;
+	list_del_rcu(&intf->link);
+	mutex_unlock(&ipmi_interfaces_mutex);
+	synchronize_rcu();
 
-	if (i == MAX_IPMI_INTERFACES)
-		return -ENODEV;
+	cleanup_smi_msgs(intf);
 
 	remove_proc_entries(intf);
 
 	/* Call all the watcher interfaces to tell them that
 	   an interface is gone. */
-	down_read(&smi_watchers_sem);
 	list_for_each_entry(w, &smi_watchers, link)
-		w->smi_gone(i);
-	up_read(&smi_watchers_sem);
-
-	/* Allow the entry to be reused now. */
-	spin_lock_irqsave(&interfaces_lock, flags);
-	ipmi_interfaces[i] = NULL;
-	spin_unlock_irqrestore(&interfaces_lock,flags);
+		w->smi_gone(intf_num);
+	mutex_unlock(&smi_watchers_mutex);
 
 	kref_put(&intf->refcount, intf_free);
 	return 0;
@@ -2660,6 +2891,7 @@
 	struct ipmi_ipmb_addr    *ipmb_addr;
 	struct ipmi_recv_msg     *recv_msg;
 	unsigned long            flags;
+	struct ipmi_smi_handlers *handlers;
 
 	if (msg->rsp_size < 10) {
 		/* Message not big enough, just ignore it. */
@@ -2716,10 +2948,16 @@
 		printk("\n");
 	}
 #endif
-		intf->handlers->sender(intf->send_info, msg, 0);
-
-		rv = -1; /* We used the message, so return the value that
-			    causes it to not be freed or queued. */
+		rcu_read_lock();
+		handlers = intf->handlers;
+		if (handlers) {
+			handlers->sender(intf->send_info, msg, 0);
+			/* We used the message, so return the value
+			   that causes it to not be freed or
+			   queued. */
+			rv = -1;
+		}
+		rcu_read_unlock();
 	} else {
 		/* Deliver the message to the user. */
 		spin_lock_irqsave(&intf->counter_lock, flags);
@@ -3309,16 +3547,6 @@
 	rcu_read_unlock();
 }
 
-static void
-handle_msg_timeout(struct ipmi_recv_msg *msg)
-{
-	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-	msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
-	msg->msg.netfn |= 1; /* Convert to a response. */
-	msg->msg.data_len = 1;
-	msg->msg.data = msg->msg_data;
-	deliver_response(msg);
-}
 
 static struct ipmi_smi_msg *
 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
@@ -3350,7 +3578,11 @@
 			      struct list_head *timeouts, long timeout_period,
 			      int slot, unsigned long *flags)
 {
-	struct ipmi_recv_msg *msg;
+	struct ipmi_recv_msg     *msg;
+	struct ipmi_smi_handlers *handlers;
+
+	if (intf->intf_num == -1)
+		return;
 
 	if (!ent->inuse)
 		return;
@@ -3393,13 +3625,19 @@
 			return;
 
 		spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
 		/* Send the new message.  We send with a zero
 		 * priority.  It timed out, I doubt time is
 		 * that critical now, and high priority
 		 * messages are really only for messages to the
 		 * local MC, which don't get resent. */
-		intf->handlers->sender(intf->send_info,
-				       smi_msg, 0);
+		handlers = intf->handlers;
+		if (handlers)
+			intf->handlers->sender(intf->send_info,
+					       smi_msg, 0);
+		else
+			ipmi_free_smi_msg(smi_msg);
+
 		spin_lock_irqsave(&intf->seq_lock, *flags);
 	}
 }
@@ -3411,18 +3649,12 @@
 	struct ipmi_recv_msg *msg, *msg2;
 	struct ipmi_smi_msg  *smi_msg, *smi_msg2;
 	unsigned long        flags;
-	int                  i, j;
+	int                  i;
 
 	INIT_LIST_HEAD(&timeouts);
 
-	spin_lock(&interfaces_lock);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
-			continue;
-		kref_get(&intf->refcount);
-		spin_unlock(&interfaces_lock);
-
+	rcu_read_lock();
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
 		/* See if any waiting messages need to be processed. */
 		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
 		list_for_each_entry_safe(smi_msg, smi_msg2,
@@ -3442,35 +3674,60 @@
 		   have timed out, putting them in the timeouts
 		   list. */
 		spin_lock_irqsave(&intf->seq_lock, flags);
-		for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
-			check_msg_timeout(intf, &(intf->seq_table[j]),
-					  &timeouts, timeout_period, j,
+		for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+			check_msg_timeout(intf, &(intf->seq_table[i]),
+					  &timeouts, timeout_period, i,
 					  &flags);
 		spin_unlock_irqrestore(&intf->seq_lock, flags);
 
 		list_for_each_entry_safe(msg, msg2, &timeouts, link)
-			handle_msg_timeout(msg);
+			deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
 
-		kref_put(&intf->refcount, intf_free);
-		spin_lock(&interfaces_lock);
+		/*
+		 * Maintenance mode handling.  Check the timeout
+		 * optimistically before we claim the lock.  It may
+		 * mean a timeout gets missed occasionally, but that
+		 * only means the timeout gets extended by one period
+		 * in that case.  No big deal, and it avoids the lock
+		 * most of the time.
+		 */
+		if (intf->auto_maintenance_timeout > 0) {
+			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+			if (intf->auto_maintenance_timeout > 0) {
+				intf->auto_maintenance_timeout
+					-= timeout_period;
+				if (!intf->maintenance_mode
+				    && (intf->auto_maintenance_timeout <= 0))
+				{
+					intf->maintenance_mode_enable = 0;
+					maintenance_mode_update(intf);
+				}
+			}
+			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+					       flags);
+		}
 	}
-	spin_unlock(&interfaces_lock);
+	rcu_read_unlock();
 }
 
 static void ipmi_request_event(void)
 {
-	ipmi_smi_t intf;
-	int        i;
+	ipmi_smi_t               intf;
+	struct ipmi_smi_handlers *handlers;
 
-	spin_lock(&interfaces_lock);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+	rcu_read_lock();
+	/* Called from the timer, no need to check if handlers is
+	 * valid. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		/* No event requests when in maintenance mode. */
+		if (intf->maintenance_mode_enable)
 			continue;
 
-		intf->handlers->request_events(intf->send_info);
+		handlers = intf->handlers;
+		if (handlers)
+			handlers->request_events(intf->send_info);
 	}
-	spin_unlock(&interfaces_lock);
+	rcu_read_unlock();
 }
 
 static struct timer_list ipmi_timer;
@@ -3599,7 +3856,6 @@
 	struct kernel_ipmi_msg            msg;
 	ipmi_smi_t                        intf;
 	unsigned char                     data[16];
-	int                               i;
 	struct ipmi_system_interface_addr *si;
 	struct ipmi_addr                  addr;
 	struct ipmi_smi_msg               smi_msg;
@@ -3633,9 +3889,9 @@
 	recv_msg.done = dummy_recv_done_handler;
 
 	/* For every registered interface, send the event. */
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (!intf->handlers)
+			/* Interface is not ready. */
 			continue;
 
 		/* Send the event announcing the panic. */
@@ -3660,13 +3916,14 @@
 	if (!str) 
 		return;
 
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
+	/* For every registered interface, send the event. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
 		char                  *p = str;
 		struct ipmi_ipmb_addr *ipmb;
 		int                   j;
 
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+		if (intf->intf_num == -1)
+			/* Interface was not ready yet. */
 			continue;
 
 		/* First job here is to figure out where to send the
@@ -3792,7 +4049,6 @@
 		       unsigned long         event,
                        void                  *ptr)
 {
-	int        i;
 	ipmi_smi_t intf;
 
 	if (has_panicked)
@@ -3800,9 +4056,9 @@
 	has_panicked = 1;
 
 	/* For every registered interface, set it to run to completion. */
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (!intf->handlers)
+			/* Interface is not ready. */
 			continue;
 
 		intf->handlers->set_run_to_completion(intf->send_info, 1);
@@ -3823,7 +4079,6 @@
 
 static int ipmi_init_msghandler(void)
 {
-	int i;
 	int rv;
 
 	if (initialized)
@@ -3838,9 +4093,6 @@
 	printk(KERN_INFO "ipmi message handler version "
 	       IPMI_DRIVER_VERSION "\n");
 
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++)
-		ipmi_interfaces[i] = NULL;
-
 #ifdef CONFIG_PROC_FS
 	proc_ipmi_root = proc_mkdir("ipmi", NULL);
 	if (!proc_ipmi_root) {
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 8d941db..597eb4f 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -43,6 +43,9 @@
 
 #define PFX "IPMI poweroff: "
 
+static void ipmi_po_smi_gone(int if_num);
+static void ipmi_po_new_smi(int if_num, struct device *device);
+
 /* Definitions for controlling power off (if the system supports it).  It
  * conveniently matches the IPMI chassis control values. */
 #define IPMI_CHASSIS_POWER_DOWN		0	/* power down, the default. */
@@ -51,6 +54,37 @@
 /* the IPMI data command */
 static int poweroff_powercycle;
 
+/* Which interface to use, -1 means the first we see. */
+static int ifnum_to_use = -1;
+
+/* Our local state. */
+static int ready = 0;
+static ipmi_user_t ipmi_user;
+static int ipmi_ifnum;
+static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+static int set_param_ifnum(const char *val, struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+	if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
+		return 0;
+
+	ipmi_po_smi_gone(ipmi_ifnum);
+	ipmi_po_new_smi(ifnum_to_use, NULL);
+	return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
+		  &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+		 "timer.  Setting to -1 defaults to the first registered "
+		 "interface");
+
 /* parameter definition to allow user to flag power cycle */
 module_param(poweroff_powercycle, int, 0644);
 MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
@@ -142,6 +176,42 @@
 #define IPMI_ATCA_GET_ADDR_INFO_CMD	0x01
 #define IPMI_PICMG_ID			0
 
+#define IPMI_NETFN_OEM				0x2e
+#define IPMI_ATCA_PPS_GRACEFUL_RESTART		0x11
+#define IPMI_ATCA_PPS_IANA			"\x00\x40\x0A"
+#define IPMI_MOTOROLA_MANUFACTURER_ID		0x0000A1
+#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID	0x0051
+
+static void (*atca_oem_poweroff_hook)(ipmi_user_t user) = NULL;
+
+static void pps_poweroff_atca (ipmi_user_t user)
+{
+        struct ipmi_system_interface_addr smi_addr;
+        struct kernel_ipmi_msg            send_msg;
+        int                               rv;
+        /*
+         * Configure IPMI address for local access
+         */
+        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+        smi_addr.channel = IPMI_BMC_CHANNEL;
+        smi_addr.lun = 0;
+
+        printk(KERN_INFO PFX "PPS powerdown hook used");
+
+        send_msg.netfn = IPMI_NETFN_OEM;
+        send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+        send_msg.data = IPMI_ATCA_PPS_IANA;
+        send_msg.data_len = 3;
+        rv = ipmi_request_in_rc_mode(user,
+                                  (struct ipmi_addr *) &smi_addr,
+                                   &send_msg);
+        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+                printk(KERN_ERR PFX "Unable to send ATCA ,"
+                       " IPMI error 0x%x\n", rv);
+        }
+	return;
+}
+
 static int ipmi_atca_detect (ipmi_user_t user)
 {
 	struct ipmi_system_interface_addr smi_addr;
@@ -167,6 +237,13 @@
 	rv = ipmi_request_wait_for_response(user,
 					    (struct ipmi_addr *) &smi_addr,
 					    &send_msg);
+
+        printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
+        if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+            && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+		printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n");
+		atca_oem_poweroff_hook = pps_poweroff_atca;
+	}
 	return !rv;
 }
 
@@ -200,12 +277,19 @@
 	rv = ipmi_request_in_rc_mode(user,
 				     (struct ipmi_addr *) &smi_addr,
 				     &send_msg);
-	if (rv) {
+        /** At this point, the system may be shutting down, and most
+         ** serial drivers (if used) will have interrupts turned off
+         ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+         ** return code
+         **/
+        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
 		printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
 		       " IPMI error 0x%x\n", rv);
 		goto out;
 	}
 
+	if(atca_oem_poweroff_hook)
+		return atca_oem_poweroff_hook(user);
  out:
 	return;
 }
@@ -440,15 +524,6 @@
 		      / sizeof(struct poweroff_function))
 
 
-/* Our local state. */
-static int ready = 0;
-static ipmi_user_t ipmi_user;
-static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
-
-/* Holds the old poweroff function so we can restore it on removal. */
-static void (*old_poweroff_func)(void);
-
-
 /* Called on a powerdown request. */
 static void ipmi_poweroff_function (void)
 {
@@ -473,6 +548,9 @@
 	if (ready)
 		return;
 
+	if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
+		return;
+
 	rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
 			      &ipmi_user);
 	if (rv) {
@@ -481,6 +559,8 @@
 		return;
 	}
 
+	ipmi_ifnum = if_num;
+
         /*
          * Do a get device ide and store some results, since this is
 	 * used by several functions.
@@ -541,9 +621,15 @@
 
 static void ipmi_po_smi_gone(int if_num)
 {
-	/* This can never be called, because once poweroff driver is
-	   registered, the interface can't go away until the power
-	   driver is unregistered. */
+	if (!ready)
+		return;
+
+	if (ipmi_ifnum != if_num)
+		return;
+
+	ready = 0;
+	ipmi_destroy_user(ipmi_user);
+	pm_power_off = old_poweroff_func;
 }
 
 static struct ipmi_smi_watcher smi_watcher =
@@ -616,9 +702,9 @@
 		printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
 		goto out_err;
 	}
-#endif
 
  out_err:
+#endif
 	return rv;
 }
 
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index bb1fac1..81a0c89 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -61,6 +61,10 @@
 #include "ipmi_si_sm.h"
 #include <linux/init.h>
 #include <linux/dmi.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#define PFX "ipmi_si: "
 
 /* Measure times between events in the driver. */
 #undef DEBUG_TIMING
@@ -92,7 +96,7 @@
 enum si_type {
     SI_KCS, SI_SMIC, SI_BT
 };
-static char *si_to_str[] = { "KCS", "SMIC", "BT" };
+static char *si_to_str[] = { "kcs", "smic", "bt" };
 
 #define DEVICE_NAME "ipmi_si"
 
@@ -222,7 +226,10 @@
 static int force_kipmid[SI_MAX_PARMS];
 static int num_force_kipmid;
 
+static int unload_when_empty = 1;
+
 static int try_smi_init(struct smi_info *smi);
+static void cleanup_one_si(struct smi_info *to_clean);
 
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block * nb)
@@ -240,14 +247,18 @@
 	spin_lock(&(smi_info->si_lock));
 }
 
-static void return_hosed_msg(struct smi_info *smi_info)
+static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 {
 	struct ipmi_smi_msg *msg = smi_info->curr_msg;
 
+	if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
+		cCode = IPMI_ERR_UNSPECIFIED;
+	/* else use it as is */
+
 	/* Make it a reponse */
 	msg->rsp[0] = msg->data[0] | 4;
 	msg->rsp[1] = msg->data[1];
-	msg->rsp[2] = 0xFF; /* Unknown error. */
+	msg->rsp[2] = cCode;
 	msg->rsp_size = 3;
 
 	smi_info->curr_msg = NULL;
@@ -298,7 +309,7 @@
 			smi_info->curr_msg->data,
 			smi_info->curr_msg->data_size);
 		if (err) {
-			return_hosed_msg(smi_info);
+			return_hosed_msg(smi_info, err);
 		}
 
 		rv = SI_SM_CALL_WITHOUT_DELAY;
@@ -640,7 +651,7 @@
 			/* If we were handling a user message, format
                            a response to send to the upper layer to
                            tell it about the error. */
-			return_hosed_msg(smi_info);
+			return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
 		}
 		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 	}
@@ -684,22 +695,24 @@
 	{
 		/* We are idle and the upper layer requested that I fetch
 		   events, so do so. */
-		unsigned char msg[2];
-
-		spin_lock(&smi_info->count_lock);
-		smi_info->flag_fetches++;
-		spin_unlock(&smi_info->count_lock);
-
 		atomic_set(&smi_info->req_events, 0);
-		msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
-		msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+		smi_info->curr_msg = ipmi_alloc_smi_msg();
+		if (!smi_info->curr_msg)
+			goto out;
+
+		smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+		smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+		smi_info->curr_msg->data_size = 2;
 
 		smi_info->handlers->start_transaction(
-			smi_info->si_sm, msg, 2);
-		smi_info->si_state = SI_GETTING_FLAGS;
+			smi_info->si_sm,
+			smi_info->curr_msg->data,
+			smi_info->curr_msg->data_size);
+		smi_info->si_state = SI_GETTING_EVENTS;
 		goto restart;
 	}
-
+ out:
 	return si_sm_result;
 }
 
@@ -714,6 +727,15 @@
 	struct timeval    t;
 #endif
 
+	if (atomic_read(&smi_info->stop_operation)) {
+		msg->rsp[0] = msg->data[0] | 4;
+		msg->rsp[1] = msg->data[1];
+		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+		msg->rsp_size = 3;
+		deliver_recv_msg(smi_info, msg);
+		return;
+	}
+
 	spin_lock_irqsave(&(smi_info->msg_lock), flags);
 #ifdef DEBUG_TIMING
 	do_gettimeofday(&t);
@@ -805,13 +827,21 @@
 {
 	struct smi_info *smi_info = send_info;
 
-	smi_event_handler(smi_info, 0);
+	/*
+	 * Make sure there is some delay in the poll loop so we can
+	 * drive time forward and timeout things.
+	 */
+	udelay(10);
+	smi_event_handler(smi_info, 10);
 }
 
 static void request_events(void *send_info)
 {
 	struct smi_info *smi_info = send_info;
 
+	if (atomic_read(&smi_info->stop_operation))
+		return;
+
 	atomic_set(&smi_info->req_events, 1);
 }
 
@@ -949,12 +979,21 @@
 	return 0;
 }
 
+static void set_maintenance_mode(void *send_info, int enable)
+{
+	struct smi_info   *smi_info = send_info;
+
+	if (!enable)
+		atomic_set(&smi_info->req_events, 0);
+}
+
 static struct ipmi_smi_handlers handlers =
 {
 	.owner                  = THIS_MODULE,
 	.start_processing       = smi_start_processing,
 	.sender			= sender,
 	.request_events		= request_events,
+	.set_maintenance_mode   = set_maintenance_mode,
 	.set_run_to_completion  = set_run_to_completion,
 	.poll			= poll,
 };
@@ -987,6 +1026,16 @@
 static int slave_addrs[SI_MAX_PARMS];
 static int num_slave_addrs = 0;
 
+#define IPMI_IO_ADDR_SPACE  0
+#define IPMI_MEM_ADDR_SPACE 1
+static char *addr_space_to_str[] = { "I/O", "mem" };
+
+static int hotmod_handler(const char *val, struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
+		 " Documentation/IPMI.txt in the kernel sources for the"
+		 " gory details.");
 
 module_param_named(trydefaults, si_trydefaults, bool, 0);
 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
@@ -1038,12 +1087,12 @@
 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
 		 " disabled(0).  Normally the IPMI driver auto-detects"
 		 " this, but the value may be overridden by this parm.");
+module_param(unload_when_empty, int, 0);
+MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
+		 " specified or found, default is 1.  Setting to 0"
+		 " is useful for hot add of devices using hotmod.");
 
 
-#define IPMI_IO_ADDR_SPACE  0
-#define IPMI_MEM_ADDR_SPACE 1
-static char *addr_space_to_str[] = { "I/O", "memory" };
-
 static void std_irq_cleanup(struct smi_info *info)
 {
 	if (info->si_type == SI_BT)
@@ -1317,6 +1366,234 @@
 	return 0;
 }
 
+/*
+ * Parms come in as <op1>[:op2[:op3...]].  ops are:
+ *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ *   rsp=<regspacing>
+ *   rsi=<regsize>
+ *   rsh=<regshift>
+ *   irq=<irq>
+ *   ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+	char *name;
+	int  val;
+};
+static struct hotmod_vals hotmod_ops[] = {
+	{ "add",	HM_ADD },
+	{ "remove",	HM_REMOVE },
+	{ NULL }
+};
+static struct hotmod_vals hotmod_si[] = {
+	{ "kcs",	SI_KCS },
+	{ "smic",	SI_SMIC },
+	{ "bt",		SI_BT },
+	{ NULL }
+};
+static struct hotmod_vals hotmod_as[] = {
+	{ "mem",	IPMI_MEM_ADDR_SPACE },
+	{ "i/o",	IPMI_IO_ADDR_SPACE },
+	{ NULL }
+};
+static int ipmi_strcasecmp(const char *s1, const char *s2)
+{
+	while (*s1 || *s2) {
+		if (!*s1)
+			return -1;
+		if (!*s2)
+			return 1;
+		if (*s1 != *s2)
+			return *s1 - *s2;
+		s1++;
+		s2++;
+	}
+	return 0;
+}
+static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
+{
+	char *s;
+	int  i;
+
+	s = strchr(*curr, ',');
+	if (!s) {
+		printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
+		return -EINVAL;
+	}
+	*s = '\0';
+	s++;
+	for (i = 0; hotmod_ops[i].name; i++) {
+		if (ipmi_strcasecmp(*curr, v[i].name) == 0) {
+			*val = v[i].val;
+			*curr = s;
+			return 0;
+		}
+	}
+
+	printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
+	return -EINVAL;
+}
+
+static int hotmod_handler(const char *val, struct kernel_param *kp)
+{
+	char *str = kstrdup(val, GFP_KERNEL);
+	int  rv = -EINVAL;
+	char *next, *curr, *s, *n, *o;
+	enum hotmod_op op;
+	enum si_type si_type;
+	int  addr_space;
+	unsigned long addr;
+	int regspacing;
+	int regsize;
+	int regshift;
+	int irq;
+	int ipmb;
+	int ival;
+	struct smi_info *info;
+
+	if (!str)
+		return -ENOMEM;
+
+	/* Kill any trailing spaces, as we can get a "\n" from echo. */
+	ival = strlen(str) - 1;
+	while ((ival >= 0) && isspace(str[ival])) {
+		str[ival] = '\0';
+		ival--;
+	}
+
+	for (curr = str; curr; curr = next) {
+		regspacing = 1;
+		regsize = 1;
+		regshift = 0;
+		irq = 0;
+		ipmb = 0x20;
+
+		next = strchr(curr, ':');
+		if (next) {
+			*next = '\0';
+			next++;
+		}
+
+		rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+		if (rv)
+			break;
+		op = ival;
+
+		rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+		if (rv)
+			break;
+		si_type = ival;
+
+		rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
+		if (rv)
+			break;
+
+		s = strchr(curr, ',');
+		if (s) {
+			*s = '\0';
+			s++;
+		}
+		addr = simple_strtoul(curr, &n, 0);
+		if ((*n != '\0') || (*curr == '\0')) {
+			printk(KERN_WARNING PFX "Invalid hotmod address"
+			       " '%s'\n", curr);
+			break;
+		}
+
+		while (s) {
+			curr = s;
+			s = strchr(curr, ',');
+			if (s) {
+				*s = '\0';
+				s++;
+			}
+			o = strchr(curr, '=');
+			if (o) {
+				*o = '\0';
+				o++;
+			}
+#define HOTMOD_INT_OPT(name, val) \
+			if (ipmi_strcasecmp(curr, name) == 0) {		\
+				if (!o) {				\
+					printk(KERN_WARNING PFX		\
+					       "No option given for '%s'\n", \
+						curr);			\
+					goto out;			\
+				}					\
+				val = simple_strtoul(o, &n, 0);		\
+				if ((*n != '\0') || (*o == '\0')) {	\
+					printk(KERN_WARNING PFX		\
+					       "Bad option given for '%s'\n", \
+					       curr);			\
+					goto out;			\
+				}					\
+			}
+
+			HOTMOD_INT_OPT("rsp", regspacing)
+			else HOTMOD_INT_OPT("rsi", regsize)
+			else HOTMOD_INT_OPT("rsh", regshift)
+			else HOTMOD_INT_OPT("irq", irq)
+			else HOTMOD_INT_OPT("ipmb", ipmb)
+			else {
+				printk(KERN_WARNING PFX
+				       "Invalid hotmod option '%s'\n",
+				       curr);
+				goto out;
+			}
+#undef HOTMOD_INT_OPT
+		}
+
+		if (op == HM_ADD) {
+			info = kzalloc(sizeof(*info), GFP_KERNEL);
+			if (!info) {
+				rv = -ENOMEM;
+				goto out;
+			}
+
+			info->addr_source = "hotmod";
+			info->si_type = si_type;
+			info->io.addr_data = addr;
+			info->io.addr_type = addr_space;
+			if (addr_space == IPMI_MEM_ADDR_SPACE)
+				info->io_setup = mem_setup;
+			else
+				info->io_setup = port_setup;
+
+			info->io.addr = NULL;
+			info->io.regspacing = regspacing;
+			if (!info->io.regspacing)
+				info->io.regspacing = DEFAULT_REGSPACING;
+			info->io.regsize = regsize;
+			if (!info->io.regsize)
+				info->io.regsize = DEFAULT_REGSPACING;
+			info->io.regshift = regshift;
+			info->irq = irq;
+			if (info->irq)
+				info->irq_setup = std_irq_setup;
+			info->slave_addr = ipmb;
+
+			try_smi_init(info);
+		} else {
+			/* remove */
+			struct smi_info *e, *tmp_e;
+
+			mutex_lock(&smi_infos_lock);
+			list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+				if (e->io.addr_type != addr_space)
+					continue;
+				if (e->si_type != si_type)
+					continue;
+				if (e->io.addr_data == addr)
+					cleanup_one_si(e);
+			}
+			mutex_unlock(&smi_infos_lock);
+		}
+	}
+ out:
+	kfree(str);
+	return rv;
+}
 
 static __devinit void hardcode_find_bmc(void)
 {
@@ -1333,11 +1610,11 @@
 
 		info->addr_source = "hardcoded";
 
-		if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+		if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) {
 			info->si_type = SI_KCS;
-		} else if (strcmp(si_type[i], "smic") == 0) {
+		} else if (ipmi_strcasecmp(si_type[i], "smic") == 0) {
 			info->si_type = SI_SMIC;
-		} else if (strcmp(si_type[i], "bt") == 0) {
+		} else if (ipmi_strcasecmp(si_type[i], "bt") == 0) {
 			info->si_type = SI_BT;
 		} else {
 			printk(KERN_WARNING
@@ -1952,19 +2229,9 @@
 static int type_file_read_proc(char *page, char **start, off_t off,
 			       int count, int *eof, void *data)
 {
-	char            *out = (char *) page;
 	struct smi_info *smi = data;
 
-	switch (smi->si_type) {
-	    case SI_KCS:
-		return sprintf(out, "kcs\n");
-	    case SI_SMIC:
-		return sprintf(out, "smic\n");
-	    case SI_BT:
-		return sprintf(out, "bt\n");
-	    default:
-		return 0;
-	}
+	return sprintf(page, "%s\n", si_to_str[smi->si_type]);
 }
 
 static int stat_file_read_proc(char *page, char **start, off_t off,
@@ -2000,7 +2267,24 @@
 	out += sprintf(out, "incoming_messages:     %ld\n",
 		       smi->incoming_messages);
 
-	return (out - ((char *) page));
+	return out - page;
+}
+
+static int param_read_proc(char *page, char **start, off_t off,
+			   int count, int *eof, void *data)
+{
+	struct smi_info *smi = data;
+
+	return sprintf(page,
+		       "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+		       si_to_str[smi->si_type],
+		       addr_space_to_str[smi->io.addr_type],
+		       smi->io.addr_data,
+		       smi->io.regspacing,
+		       smi->io.regsize,
+		       smi->io.regshift,
+		       smi->irq,
+		       smi->slave_addr);
 }
 
 /*
@@ -2362,6 +2646,7 @@
 			       new_smi,
 			       &new_smi->device_id,
 			       new_smi->dev,
+			       "bmc",
 			       new_smi->slave_addr);
 	if (rv) {
 		printk(KERN_ERR
@@ -2390,6 +2675,16 @@
 		goto out_err_stop_timer;
 	}
 
+	rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
+				     param_read_proc, NULL,
+				     new_smi, THIS_MODULE);
+	if (rv) {
+		printk(KERN_ERR
+		       "ipmi_si: Unable to create proc entry: %d\n",
+		       rv);
+		goto out_err_stop_timer;
+	}
+
 	list_add_tail(&new_smi->link, &smi_infos);
 
 	mutex_unlock(&smi_infos_lock);
@@ -2483,7 +2778,12 @@
 #endif
 
 #ifdef CONFIG_PCI
-	pci_module_init(&ipmi_pci_driver);
+	rv = pci_register_driver(&ipmi_pci_driver);
+	if (rv){
+		printk(KERN_ERR
+		       "init_ipmi_si: Unable to register PCI driver: %d\n",
+		       rv);
+	}
 #endif
 
 	if (si_trydefaults) {
@@ -2498,7 +2798,7 @@
 	}
 
 	mutex_lock(&smi_infos_lock);
-	if (list_empty(&smi_infos)) {
+	if (unload_when_empty && list_empty(&smi_infos)) {
 		mutex_unlock(&smi_infos_lock);
 #ifdef CONFIG_PCI
 		pci_unregister_driver(&ipmi_pci_driver);
@@ -2513,7 +2813,7 @@
 }
 module_init(init_ipmi_si);
 
-static void __devexit cleanup_one_si(struct smi_info *to_clean)
+static void cleanup_one_si(struct smi_info *to_clean)
 {
 	int           rv;
 	unsigned long flags;
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index 39d7e5e..e64ea7d 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -141,12 +141,14 @@
 {
 	unsigned int i;
 
-	if ((size < 2) || (size > MAX_SMIC_WRITE_SIZE)) {
-		return -1;
-	}
-	if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) {
-		return -2;
-	}
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > MAX_SMIC_WRITE_SIZE)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED))
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
 	if (smic_debug & SMIC_DEBUG_MSG) {
 		printk(KERN_INFO "start_smic_transaction -");
 		for (i = 0; i < size; i ++) {
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 73f759e..90fb2a5 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -135,6 +135,7 @@
 static int nowayout = WATCHDOG_NOWAYOUT;
 
 static ipmi_user_t watchdog_user = NULL;
+static int watchdog_ifnum;
 
 /* Default the timeout to 10 seconds. */
 static int timeout = 10;
@@ -161,6 +162,8 @@
 static char pretimeout_since_last_heartbeat = 0;
 static char expect_close;
 
+static int ifnum_to_use = -1;
+
 static DECLARE_RWSEM(register_sem);
 
 /* Parameters to ipmi_set_timeout */
@@ -169,6 +172,8 @@
 #define IPMI_SET_TIMEOUT_FORCE_HB		2
 
 static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
 
 /* If true, the driver will start running as soon as it is configured
    and ready. */
@@ -245,6 +250,26 @@
 	return strlen(buffer);
 }
 
+
+static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+	if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+		return 0;
+
+	ipmi_unregister_watchdog(watchdog_ifnum);
+	ipmi_register_watchdog(ifnum_to_use);
+	return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int,
+		  &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+		 "timer.  Setting to -1 defaults to the first registered "
+		 "interface");
+
 module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
 MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
 
@@ -263,12 +288,13 @@
 MODULE_PARM_DESC(preop, "Pretimeout driver operation.  One of: "
 		 "preop_none, preop_panic, preop_give_data.");
 
-module_param(start_now, int, 0);
+module_param(start_now, int, 0444);
 MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
 		 "soon as the driver is loaded.");
 
 module_param(nowayout, int, 0644);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+		 "(default=CONFIG_WATCHDOG_NOWAYOUT)");
 
 /* Default state of the timer. */
 static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
@@ -872,6 +898,11 @@
 	if (watchdog_user)
 		goto out;
 
+	if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+		goto out;
+
+	watchdog_ifnum = ipmi_intf;
+
 	rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
 	if (rv < 0) {
 		printk(KERN_CRIT PFX "Unable to register with ipmi\n");
@@ -901,6 +932,39 @@
 	}
 }
 
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+	int rv;
+
+	down_write(&register_sem);
+
+	if (!watchdog_user)
+		goto out;
+
+	if (watchdog_ifnum != ipmi_intf)
+		goto out;
+
+	/* Make sure no one can call us any more. */
+	misc_deregister(&ipmi_wdog_miscdev);
+
+	/* Wait to make sure the message makes it out.  The lower layer has
+	   pointers to our buffers, we want to make sure they are done before
+	   we release our memory. */
+	while (atomic_read(&set_timeout_tofree))
+		schedule_timeout_uninterruptible(1);
+
+	/* Disconnect from IPMI. */
+	rv = ipmi_destroy_user(watchdog_user);
+	if (rv) {
+		printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
+		       rv);
+	}
+	watchdog_user = NULL;
+
+ out:
+	up_write(&register_sem);
+}
+
 #ifdef HAVE_NMI_HANDLER
 static int
 ipmi_nmi(void *dev_id, int cpu, int handled)
@@ -1004,9 +1068,7 @@
 
 static void ipmi_smi_gone(int if_num)
 {
-	/* This can never be called, because once the watchdog is
-	   registered, the interface can't go away until the watchdog
-	   is unregistered. */
+	ipmi_unregister_watchdog(if_num);
 }
 
 static struct ipmi_smi_watcher smi_watcher =
@@ -1148,30 +1210,32 @@
 
 	check_parms();
 
+	register_reboot_notifier(&wdog_reboot_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&wdog_panic_notifier);
+
 	rv = ipmi_smi_watcher_register(&smi_watcher);
 	if (rv) {
 #ifdef HAVE_NMI_HANDLER
 		if (preaction_val == WDOG_PRETIMEOUT_NMI)
 			release_nmi(&ipmi_nmi_handler);
 #endif
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+						 &wdog_panic_notifier);
+		unregister_reboot_notifier(&wdog_reboot_notifier);
 		printk(KERN_WARNING PFX "can't register smi watcher\n");
 		return rv;
 	}
 
-	register_reboot_notifier(&wdog_reboot_notifier);
-	atomic_notifier_chain_register(&panic_notifier_list,
-			&wdog_panic_notifier);
-
 	printk(KERN_INFO PFX "driver initialized\n");
 
 	return 0;
 }
 
-static __exit void ipmi_unregister_watchdog(void)
+static void __exit ipmi_wdog_exit(void)
 {
-	int rv;
-
-	down_write(&register_sem);
+	ipmi_smi_watcher_unregister(&smi_watcher);
+	ipmi_unregister_watchdog(watchdog_ifnum);
 
 #ifdef HAVE_NMI_HANDLER
 	if (nmi_handler_registered)
@@ -1179,37 +1243,8 @@
 #endif
 
 	atomic_notifier_chain_unregister(&panic_notifier_list,
-			&wdog_panic_notifier);
+					 &wdog_panic_notifier);
 	unregister_reboot_notifier(&wdog_reboot_notifier);
-
-	if (! watchdog_user)
-		goto out;
-
-	/* Make sure no one can call us any more. */
-	misc_deregister(&ipmi_wdog_miscdev);
-
-	/* Wait to make sure the message makes it out.  The lower layer has
-	   pointers to our buffers, we want to make sure they are done before
-	   we release our memory. */
-	while (atomic_read(&set_timeout_tofree))
-		schedule_timeout_uninterruptible(1);
-
-	/* Disconnect from IPMI. */
-	rv = ipmi_destroy_user(watchdog_user);
-	if (rv) {
-		printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
-		       rv);
-	}
-	watchdog_user = NULL;
-
- out:
-	up_write(&register_sem);
-}
-
-static void __exit ipmi_wdog_exit(void)
-{
-	ipmi_smi_watcher_unregister(&smi_watcher);
-	ipmi_unregister_watchdog();
 }
 module_exit(ipmi_wdog_exit);
 module_init(ipmi_wdog_init);
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 58c955e..1637c1d 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -530,9 +530,9 @@
 /* 	Interrupt handlers 	*/
 
 
-static void isicom_bottomhalf(void *data)
+static void isicom_bottomhalf(struct work_struct *work)
 {
-	struct isi_port *port = (struct isi_port *) data;
+	struct isi_port *port = container_of(work, struct isi_port, bh_tqueue);
 	struct tty_struct *tty = port->tty;
 
 	if (!tty)
@@ -1474,9 +1474,9 @@
 }
 
 /* hangup et all */
-static void do_isicom_hangup(void *data)
+static void do_isicom_hangup(struct work_struct *work)
 {
-	struct isi_port *port = data;
+	struct isi_port *port = container_of(work, struct isi_port, hangup_tq);
 	struct tty_struct *tty;
 
 	tty = port->tty;
@@ -1966,8 +1966,8 @@
 			port->channel = channel;
 			port->close_delay = 50 * HZ/100;
 			port->closing_wait = 3000 * HZ/100;
-			INIT_WORK(&port->hangup_tq, do_isicom_hangup, port);
-			INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port);
+			INIT_WORK(&port->hangup_tq, do_isicom_hangup);
+			INIT_WORK(&port->bh_tqueue, isicom_bottomhalf);
 			port->status = 0;
 			init_waitqueue_head(&port->open_wait);
 			init_waitqueue_head(&port->close_wait);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index ffdf9df..8f59194 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -663,7 +663,7 @@
 static int	stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait);
 static int	stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait);
 static int	stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *filp);
-static void	stli_dohangup(void *arg);
+static void	stli_dohangup(struct work_struct *);
 static int	stli_setport(stliport_t *portp);
 static int	stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
 static void	stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
@@ -1990,9 +1990,9 @@
  *	aren't that time critical).
  */
 
-static void stli_dohangup(void *arg)
+static void stli_dohangup(struct work_struct *ugly_api)
 {
-	stliport_t *portp = (stliport_t *) arg;
+	stliport_t *portp = container_of(ugly_api, stliport_t, tqhangup);
 	if (portp->tty != NULL) {
 		tty_hangup(portp->tty);
 	}
@@ -2898,7 +2898,7 @@
 		portp->baud_base = STL_BAUDBASE;
 		portp->close_delay = STL_CLOSEDELAY;
 		portp->closing_wait = 30 * HZ;
-		INIT_WORK(&portp->tqhangup, stli_dohangup, portp);
+		INIT_WORK(&portp->tqhangup, stli_dohangup);
 		init_waitqueue_head(&portp->open_wait);
 		init_waitqueue_head(&portp->close_wait);
 		init_waitqueue_head(&portp->raw_wait);
@@ -3476,6 +3476,8 @@
 	if (sig.magic != cpu_to_le32(ECP_MAGIC))
 	{
 		release_region(brdp->iobase, brdp->iosize);
+		iounmap(brdp->membase);
+		brdp->membase = NULL;
 		return -ENODEV;
 	}
 
@@ -3632,6 +3634,8 @@
 	    sig.magic3 != cpu_to_le16(ONB_MAGIC3))
 	{
 		release_region(brdp->iobase, brdp->iosize);
+		iounmap(brdp->membase);
+		brdp->membase = NULL;
 		return -ENODEV;
 	}
 
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 7a484fc..7e975f6 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -199,6 +199,8 @@
 	dev_t dev;
 	int err = 0;
 
+	INIT_LIST_HEAD(&misc->list);
+
 	down(&misc_sem);
 	list_for_each_entry(c, &misc_list, list) {
 		if (c->minor == misc->minor) {
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 22b9905..c091603 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -680,7 +680,7 @@
 	if (sn_rtc_cycles_per_second < 100000) {
 		printk(KERN_ERR "%s: unable to determine clock frequency\n",
 		       MMTIMER_NAME);
-		return -1;
+		goto out1;
 	}
 
 	mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
@@ -689,13 +689,13 @@
 	if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) {
 		printk(KERN_WARNING "%s: unable to allocate interrupt.",
 			MMTIMER_NAME);
-		return -1;
+		goto out1;
 	}
 
 	if (misc_register(&mmtimer_miscdev)) {
 		printk(KERN_ERR "%s: failed to register device\n",
 		       MMTIMER_NAME);
-		return -1;
+		goto out2;
 	}
 
 	/* Get max numbered node, calculate slots needed */
@@ -709,16 +709,18 @@
 	if (timers == NULL) {
 		printk(KERN_ERR "%s: failed to allocate memory for device\n",
 				MMTIMER_NAME);
-		return -1;
+		goto out3;
 	}
 
+	memset(timers,0,(sizeof(mmtimer_t *)*maxn));
+
 	/* Allocate mmtimer_t's for each online node */
 	for_each_online_node(node) {
 		timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
 		if (timers[node] == NULL) {
 			printk(KERN_ERR "%s: failed to allocate memory for device\n",
 				MMTIMER_NAME);
-			return -1;
+			goto out4;
 		}
 		for (i=0; i< NUM_COMPARATORS; i++) {
 			mmtimer_t * base = timers[node] + i;
@@ -739,6 +741,17 @@
 	       sn_rtc_cycles_per_second/(unsigned long)1E6);
 
 	return 0;
+
+out4:
+	for_each_online_node(node) {
+		kfree(timers[node]);
+	}
+out3:
+	misc_deregister(&mmtimer_miscdev);
+out2:
+	free_irq(SGI_MMTIMER_VECTOR, NULL);
+out1:
+	return -1;
 }
 
 module_init(mmtimer_init);
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 96cb1f0..8b31695 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -222,7 +222,7 @@
 /*
  * static functions:
  */
-static void do_moxa_softint(void *);
+static void do_moxa_softint(struct work_struct *);
 static int moxa_open(struct tty_struct *, struct file *);
 static void moxa_close(struct tty_struct *, struct file *);
 static int moxa_write(struct tty_struct *, const unsigned char *, int);
@@ -363,7 +363,7 @@
 	for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
 		ch->type = PORT_16550A;
 		ch->port = i;
-		INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
+		INIT_WORK(&ch->tqueue, do_moxa_softint);
 		ch->tty = NULL;
 		ch->close_delay = 5 * HZ / 10;
 		ch->closing_wait = 30 * HZ;
@@ -498,9 +498,12 @@
 		printk("Couldn't unregister MOXA Intellio family serial driver\n");
 	put_tty_driver(moxaDriver);
 
-	for (i = 0; i < MAX_BOARDS; i++)
+	for (i = 0; i < MAX_BOARDS; i++) {
+		if (moxaBaseAddr[i])
+			iounmap(moxaBaseAddr[i]);
 		if (moxa_boards[i].busType == MOXA_BUS_TYPE_PCI)
 			pci_dev_put(moxa_boards[i].pciInfo.pdev);
+	}
 
 	if (verbose)
 		printk("Done\n");
@@ -509,9 +512,9 @@
 module_init(moxa_init);
 module_exit(moxa_exit);
 
-static void do_moxa_softint(void *private_)
+static void do_moxa_softint(struct work_struct *work)
 {
-	struct moxa_str *ch = (struct moxa_str *) private_;
+	struct moxa_str *ch = container_of(work, struct moxa_str, tqueue);
 	struct tty_struct *tty;
 
 	if (ch && (tty = ch->tty)) {
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 048d911..5ed2486 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -389,7 +389,7 @@
 /* static void   mxser_poll(unsigned long); */
 static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
 static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
-static void mxser_do_softint(void *);
+static void mxser_do_softint(struct work_struct *);
 static int mxser_open(struct tty_struct *, struct file *);
 static void mxser_close(struct tty_struct *, struct file *);
 static int mxser_write(struct tty_struct *, const unsigned char *, int);
@@ -590,7 +590,7 @@
 		info->custom_divisor = hwconf->baud_base[i] * 16;
 		info->close_delay = 5 * HZ / 10;
 		info->closing_wait = 30 * HZ;
-		INIT_WORK(&info->tqueue, mxser_do_softint, info);
+		INIT_WORK(&info->tqueue, mxser_do_softint);
 		info->normal_termios = mxvar_sdriver->init_termios;
 		init_waitqueue_head(&info->open_wait);
 		init_waitqueue_head(&info->close_wait);
@@ -917,9 +917,10 @@
 	return 0;
 }
 
-static void mxser_do_softint(void *private_)
+static void mxser_do_softint(struct work_struct *work)
 {
-	struct mxser_struct *info = private_;
+	struct mxser_struct *info =
+		container_of(work, struct mxser_struct, tqueue);
 	struct tty_struct *tty;
 
 	tty = info->tty;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index f9f7250..74d21c1 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -75,8 +75,10 @@
 #include <pcmcia/cisreg.h>
 #include <pcmcia/ds.h>
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 #define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -235,7 +237,7 @@
 	int dosyncppp;
 	spinlock_t netlock;
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	struct net_device *netdev;
 #endif
 
@@ -392,7 +394,7 @@
 
 static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(MGSLPC_INFO *info);
 static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size);
@@ -421,7 +423,7 @@
 /*
  * Bottom half interrupt handlers
  */
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
 static void bh_transmit(MGSLPC_INFO *info);
 static void bh_status(MGSLPC_INFO *info);
 
@@ -547,7 +549,7 @@
 
     memset(info, 0, sizeof(MGSLPC_INFO));
     info->magic = MGSLPC_MAGIC;
-    INIT_WORK(&info->task, bh_handler, info);
+    INIT_WORK(&info->task, bh_handler);
     info->max_frame_size = 4096;
     info->close_delay = 5*HZ/10;
     info->closing_wait = 30*HZ;
@@ -835,9 +837,9 @@
 	return rc;
 }
 
-static void bh_handler(void* Context)
+static void bh_handler(struct work_struct *work)
 {
-	MGSLPC_INFO *info = (MGSLPC_INFO*)Context;
+	MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
 	int action;
 
 	if (!info)
@@ -1053,7 +1055,7 @@
 		info->drop_rts_on_tx_done = 0;
 	}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else 
@@ -1164,7 +1166,7 @@
 	}
 	else
 		info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount) {
 		if (info->serial_signals & SerialSignal_DCD)
 			netif_carrier_on(info->netdev);
@@ -2953,7 +2955,7 @@
 	printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
 		info->device_name, info->io_base, info->irq_level);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	hdlcdev_init(info);
 #endif
 }
@@ -2969,7 +2971,7 @@
 				last->next_device = info->next_device;
 			else
 				mgslpc_device_list = info->next_device;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			hdlcdev_exit(info);
 #endif
 			release_resources(info);
@@ -3901,7 +3903,7 @@
 				return_frame = 1;
 		}
 		framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		{
 			struct net_device_stats *stats = hdlc_stats(info->netdev);
 			stats->rx_errors++;
@@ -3935,7 +3937,7 @@
 				++framesize;
 			}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount)
 				hdlcdev_rx(info, buf->data, framesize);
 			else
@@ -4091,7 +4093,7 @@
 
 	spin_unlock_irqrestore(&info->lock,flags);
 	
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else
@@ -4099,7 +4101,7 @@
 		bh_transmit(info);
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d40df30..4c6782a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1422,9 +1422,9 @@
 
 static unsigned int ip_cnt;
 
-static void rekey_seq_generator(void *private_);
+static void rekey_seq_generator(struct work_struct *work);
 
-static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
+static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
 
 /*
  * Lock avoidance:
@@ -1438,7 +1438,7 @@
  * happen, and even if that happens only a not perfectly compliant
  * ISN is generated, nothing fatal.
  */
-static void rekey_seq_generator(void *private_)
+static void rekey_seq_generator(struct work_struct *work)
 {
 	struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
 
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 7ac68cb..e79b2ed 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -1026,6 +1026,7 @@
 			found++;
 		} else {
 			iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
+			p->RIOHosts[p->RIONumHosts].Caddr = NULL;
 		}
 	}
 
@@ -1078,6 +1079,7 @@
 			found++;
 		} else {
 			iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
+			p->RIOHosts[p->RIONumHosts].Caddr = NULL;
 		}
 #else
 		printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n");
@@ -1117,8 +1119,10 @@
 				}
 			}
 
-			if (!okboard)
+			if (!okboard) {
 				iounmap(hp->Caddr);
+				hp->Caddr = NULL;
+			}
 		}
 	}
 
@@ -1188,6 +1192,8 @@
 		}
 		/* It is safe/allowed to del_timer a non-active timer */
 		del_timer(&hp->timer);
+		if (hp->Caddr)
+			iounmap(hp->Caddr);
 		if (hp->Type == RIO_PCI)
 			pci_dev_put(hp->pdev);
 	}
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 5ab32b3..0a77bfc 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -82,11 +82,6 @@
 static struct riscom_board * IRQ_to_board[16];
 static struct tty_driver *riscom_driver;
 
-static unsigned long baud_table[] =  {
-	0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
-	9600, 19200, 38400, 57600, 76800, 0, 
-};
-
 static struct riscom_board rc_board[RC_NBOARD] =  {
 	{
 		.base	= RC_IOBASE1,
@@ -1516,9 +1511,9 @@
  * 	do_rc_hangup() -> tty->hangup() -> rc_hangup()
  * 
  */
-static void do_rc_hangup(void *private_)
+static void do_rc_hangup(struct work_struct *ugly_api)
 {
-	struct riscom_port	*port = (struct riscom_port *) private_;
+	struct riscom_port	*port = container_of(ugly_api, struct riscom_port, tqueue_hangup);
 	struct tty_struct	*tty;
 	
 	tty = port->tty;
@@ -1567,9 +1562,9 @@
 	}
 }
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *ugly_api)
 {
-	struct riscom_port	*port = (struct riscom_port *) private_;
+	struct riscom_port	*port = container_of(ugly_api, struct riscom_port, tqueue);
 	struct tty_struct	*tty;
 	
 	if(!(tty = port->tty)) 
@@ -1632,8 +1627,8 @@
 	memset(rc_port, 0, sizeof(rc_port));
 	for (i = 0; i < RC_NPORT * RC_NBOARD; i++)  {
 		rc_port[i].magic = RISCOM8_MAGIC;
-		INIT_WORK(&rc_port[i].tqueue, do_softint, &rc_port[i]);
-		INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup, &rc_port[i]);
+		INIT_WORK(&rc_port[i].tqueue, do_softint);
+		INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup);
 		rc_port[i].close_delay = 50 * HZ/100;
 		rc_port[i].closing_wait = 3000 * HZ/100;
 		init_waitqueue_head(&rc_port[i].open_wait);
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 3af7f09..9ba13af 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -706,9 +706,9 @@
  * had to poll every port to see if that port needed servicing.
  */
 static void
-do_softint(void *private_)
+do_softint(struct work_struct *ugly_api)
 {
-  struct cyclades_port *info = (struct cyclades_port *) private_;
+  struct cyclades_port *info = container_of(ugly_api, struct cyclades_port, tqueue);
   struct tty_struct    *tty;
 
     tty = info->tty;
@@ -2273,7 +2273,7 @@
 		info->blocked_open = 0;
 		info->default_threshold = 0;
 		info->default_timeout = 0;
-		INIT_WORK(&info->tqueue, do_softint, info);
+		INIT_WORK(&info->tqueue, do_softint);
 		init_waitqueue_head(&info->open_wait);
 		init_waitqueue_head(&info->close_wait);
 		/* info->session */
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index c084149..fc87070 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -765,7 +765,7 @@
 	sonypi_device.bluetooth_power = state;
 }
 
-static void input_keyrelease(void *data)
+static void input_keyrelease(struct work_struct *work)
 {
 	struct sonypi_keypress kp;
 
@@ -1412,7 +1412,7 @@
 			goto err_inpdev_unregister;
 		}
 
-		INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL);
+		INIT_WORK(&sonypi_device.input_work, input_keyrelease);
 	}
 
 	sonypi_enable(0);
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 7e1bd95..99137ab 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2261,9 +2261,10 @@
  * 	do_sx_hangup() -> tty->hangup() -> sx_hangup()
  *
  */
-static void do_sx_hangup(void *private_)
+static void do_sx_hangup(struct work_struct *work)
 {
-	struct specialix_port	*port = (struct specialix_port *) private_;
+	struct specialix_port	*port =
+		container_of(work, struct specialix_port, tqueue_hangup);
 	struct tty_struct	*tty;
 
 	func_enter();
@@ -2336,9 +2337,10 @@
 }
 
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 {
-	struct specialix_port	*port = (struct specialix_port *) private_;
+	struct specialix_port	*port =
+		container_of(work, struct specialix_port, tqueue);
 	struct tty_struct	*tty;
 
 	func_enter();
@@ -2411,8 +2413,8 @@
 	memset(sx_port, 0, sizeof(sx_port));
 	for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
 		sx_port[i].magic = SPECIALIX_MAGIC;
-		INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]);
-		INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]);
+		INIT_WORK(&sx_port[i].tqueue, do_softint);
+		INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
 		sx_port[i].close_delay = 50 * HZ/100;
 		sx_port[i].closing_wait = 3000 * HZ/100;
 		init_waitqueue_head(&sx_port[i].open_wait);
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 522e88e..5e2de62 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -500,7 +500,7 @@
 static int	stl_echmcaintr(stlbrd_t *brdp);
 static int	stl_echpciintr(stlbrd_t *brdp);
 static int	stl_echpci64intr(stlbrd_t *brdp);
-static void	stl_offintr(void *private);
+static void	stl_offintr(struct work_struct *);
 static stlbrd_t *stl_allocbrd(void);
 static stlport_t *stl_getport(int brdnr, int panelnr, int portnr);
 
@@ -2081,14 +2081,12 @@
 /*
  *	Service an off-level request for some channel.
  */
-static void stl_offintr(void *private)
+static void stl_offintr(struct work_struct *work)
 {
-	stlport_t		*portp;
+	stlport_t		*portp = container_of(work, stlport_t, tqueue);
 	struct tty_struct	*tty;
 	unsigned int		oldsigs;
 
-	portp = private;
-
 #ifdef DEBUG
 	printk("stl_offintr(portp=%x)\n", (int) portp);
 #endif
@@ -2156,7 +2154,7 @@
 		portp->baud_base = STL_BAUDBASE;
 		portp->close_delay = STL_CLOSEDELAY;
 		portp->closing_wait = 30 * HZ;
-		INIT_WORK(&portp->tqueue, stl_offintr, portp);
+		INIT_WORK(&portp->tqueue, stl_offintr);
 		init_waitqueue_head(&portp->open_wait);
 		init_waitqueue_head(&portp->close_wait);
 		portp->stats.brd = portp->brdnr;
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 06784ad..645187b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -101,8 +101,10 @@
 #include <linux/hdlc.h>
 #include <linux/dma-mapping.h>
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 #define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -320,7 +322,7 @@
 	int dosyncppp;
 	spinlock_t netlock;
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	struct net_device *netdev;
 #endif
 };
@@ -728,7 +730,7 @@
 
 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(struct mgsl_struct *info);
 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
@@ -802,7 +804,7 @@
 /*
  * Bottom half interrupt handlers
  */
-static void mgsl_bh_handler(void* Context);
+static void mgsl_bh_handler(struct work_struct *work);
 static void mgsl_bh_receive(struct mgsl_struct *info);
 static void mgsl_bh_transmit(struct mgsl_struct *info);
 static void mgsl_bh_status(struct mgsl_struct *info);
@@ -1071,9 +1073,10 @@
 /*
  * 	Perform bottom half processing of work items queued by ISR.
  */
-static void mgsl_bh_handler(void* Context)
+static void mgsl_bh_handler(struct work_struct *work)
 {
-	struct mgsl_struct *info = (struct mgsl_struct*)Context;
+	struct mgsl_struct *info =
+		container_of(work, struct mgsl_struct, task);
 	int action;
 
 	if (!info)
@@ -1276,7 +1279,7 @@
 		info->drop_rts_on_tx_done = 0;
 	}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else 
@@ -1341,7 +1344,7 @@
 				info->input_signal_events.dcd_up++;
 			} else
 				info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount) {
 				if (status & MISCSTATUS_DCD)
 					netif_carrier_on(info->netdev);
@@ -4312,7 +4315,7 @@
 		     	info->max_frame_size );
 	}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	hdlcdev_init(info);
 #endif
 
@@ -4337,7 +4340,7 @@
 	} else {
 		memset(info, 0, sizeof(struct mgsl_struct));
 		info->magic = MGSL_MAGIC;
-		INIT_WORK(&info->task, mgsl_bh_handler, info);
+		INIT_WORK(&info->task, mgsl_bh_handler);
 		info->max_frame_size = 4096;
 		info->close_delay = 5*HZ/10;
 		info->closing_wait = 30*HZ;
@@ -4470,7 +4473,7 @@
 
 	info = mgsl_device_list;
 	while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		hdlcdev_exit(info);
 #endif
 		mgsl_release_resources(info);
@@ -6644,7 +6647,7 @@
 				return_frame = 1;
 		}
 		framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		{
 			struct net_device_stats *stats = hdlc_stats(info->netdev);
 			stats->rx_errors++;
@@ -6720,7 +6723,7 @@
 						*ptmp);
 			}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount)
 				hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
 			else
@@ -7624,7 +7627,7 @@
 
 	spin_unlock_irqrestore(&info->irq_spinlock,flags);
 	
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else
@@ -7700,7 +7703,7 @@
  	return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index d4334c7..e4730a7 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -83,8 +83,10 @@
 
 #include "linux/synclink.h"
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 /*
@@ -171,7 +173,7 @@
 /*
  * generic HDLC support and callbacks
  */
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(struct slgt_info *info);
 static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
@@ -359,7 +361,7 @@
 	int netcount;
 	int dosyncppp;
 	spinlock_t netlock;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	struct net_device *netdev;
 #endif
 
@@ -485,7 +487,7 @@
 static void set_rate(struct slgt_info *info, u32 data_rate);
 
 static int  bh_action(struct slgt_info *info);
-static void bh_handler(void* context);
+static void bh_handler(struct work_struct *work);
 static void bh_transmit(struct slgt_info *info);
 static void isr_serial(struct slgt_info *info);
 static void isr_rdma(struct slgt_info *info);
@@ -1354,7 +1356,7 @@
 	spin_unlock_irqrestore(&info->lock,flags);
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
@@ -1878,9 +1880,9 @@
 /*
  * perform bottom half processing
  */
-static void bh_handler(void* context)
+static void bh_handler(struct work_struct *work)
 {
-	struct slgt_info *info = context;
+	struct slgt_info *info = container_of(work, struct slgt_info, task);
 	int action;
 
 	if (!info)
@@ -2002,7 +2004,7 @@
 	} else {
 		info->input_signal_events.dcd_down++;
 	}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount) {
 		if (info->signals & SerialSignal_DCD)
 			netif_carrier_on(info->netdev);
@@ -2180,7 +2182,7 @@
 			set_signals(info);
 		}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		if (info->netcount)
 			hdlcdev_tx_done(info);
 		else
@@ -3306,7 +3308,7 @@
 		devstr, info->device_name, info->phys_reg_addr,
 		info->irq_level, info->max_frame_size);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	hdlcdev_init(info);
 #endif
 }
@@ -3326,7 +3328,7 @@
 	} else {
 		memset(info, 0, sizeof(struct slgt_info));
 		info->magic = MGSL_MAGIC;
-		INIT_WORK(&info->task, bh_handler, info);
+		INIT_WORK(&info->task, bh_handler);
 		info->max_frame_size = 4096;
 		info->raw_rx_size = DMABUFSIZE;
 		info->close_delay = 5*HZ/10;
@@ -3488,7 +3490,7 @@
 	/* release devices */
 	info = slgt_device_list;
 	while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		hdlcdev_exit(info);
 #endif
 		free_dma_bufs(info);
@@ -3522,6 +3524,7 @@
 
 	if (!slgt_device_list) {
 		printk("%s no devices found\n",driver_name);
+		pci_unregister_driver(&pci_driver);
 		return -ENODEV;
 	}
 
@@ -4433,7 +4436,7 @@
 			framesize = 0;
 	}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (framesize == 0) {
 		struct net_device_stats *stats = hdlc_stats(info->netdev);
 		stats->rx_errors++;
@@ -4476,7 +4479,7 @@
 				framesize++;
 			}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount)
 				hdlcdev_rx(info,info->tmp_rbuf, framesize);
 			else
@@ -4779,7 +4782,7 @@
 	info->tx_count = 0;
 	spin_unlock_irqrestore(&info->lock,flags);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else
@@ -4799,6 +4802,6 @@
 	spin_lock_irqsave(&info->lock, flags);
 	info->pending_bh |= BH_RECEIVE;
 	spin_unlock_irqrestore(&info->lock, flags);
-	bh_handler(info);
+	bh_handler(&info->task);
 }
 
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 3e932b6..20a96ef 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -67,8 +67,10 @@
 #include <linux/workqueue.h>
 #include <linux/hdlc.h>
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 #define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -280,7 +282,7 @@
 	int dosyncppp;
 	spinlock_t netlock;
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	struct net_device *netdev;
 #endif
 
@@ -536,7 +538,7 @@
 static void unthrottle(struct tty_struct * tty);
 static void set_break(struct tty_struct *tty, int break_state);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(SLMP_INFO *info);
 static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size);
@@ -602,7 +604,7 @@
 static void set_rate(SLMP_INFO *info, u32 data_rate);
 
 static int  bh_action(SLMP_INFO *info);
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
 static void bh_receive(SLMP_INFO *info);
 static void bh_transmit(SLMP_INFO *info);
 static void bh_status(SLMP_INFO *info);
@@ -1607,7 +1609,7 @@
 	spin_unlock_irqrestore(&info->lock,flags);
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
@@ -2063,9 +2065,9 @@
 
 /* Perform bottom half processing of work items queued by ISR.
  */
-void bh_handler(void* Context)
+void bh_handler(struct work_struct *work)
 {
-	SLMP_INFO *info = (SLMP_INFO*)Context;
+	SLMP_INFO *info = container_of(work, SLMP_INFO, task);
 	int action;
 
 	if (!info)
@@ -2339,7 +2341,7 @@
 			set_signals(info);
 		}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		if (info->netcount)
 			hdlcdev_tx_done(info);
 		else
@@ -2523,7 +2525,7 @@
 				info->input_signal_events.dcd_up++;
 			} else
 				info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount) {
 				if (status & SerialSignal_DCD)
 					netif_carrier_on(info->netdev);
@@ -3783,7 +3785,7 @@
 		info->irq_level,
 		info->max_frame_size );
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	hdlcdev_init(info);
 #endif
 }
@@ -3805,7 +3807,7 @@
 	} else {
 		memset(info, 0, sizeof(SLMP_INFO));
 		info->magic = MGSL_MAGIC;
-		INIT_WORK(&info->task, bh_handler, info);
+		INIT_WORK(&info->task, bh_handler);
 		info->max_frame_size = 4096;
 		info->close_delay = 5*HZ/10;
 		info->closing_wait = 30*HZ;
@@ -3977,7 +3979,7 @@
 	/* release devices */
 	info = synclinkmp_device_list;
 	while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		hdlcdev_exit(info);
 #endif
 		free_dma_bufs(info);
@@ -4979,7 +4981,7 @@
 			info->icount.rxcrc++;
 
 		framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		{
 			struct net_device_stats *stats = hdlc_stats(info->netdev);
 			stats->rx_errors++;
@@ -5020,7 +5022,7 @@
 					index = 0;
 			}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount)
 				hdlcdev_rx(info,info->tmp_rx_buf,framesize);
 			else
@@ -5531,7 +5533,7 @@
 
 	spin_unlock_irqrestore(&info->lock,flags);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5f49280..05810c8 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -182,6 +182,18 @@
 	.enable_mask	= SYSRQ_ENABLE_DUMP,
 };
 
+static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
+{
+	show_state_filter(TASK_UNINTERRUPTIBLE);
+}
+static struct sysrq_key_op sysrq_showstate_blocked_op = {
+	.handler	= sysrq_handle_showstate_blocked,
+	.help_msg	= "showBlockedTasks",
+	.action_msg	= "Show Blocked State",
+	.enable_mask	= SYSRQ_ENABLE_DUMP,
+};
+
+
 static void sysrq_handle_showmem(int key, struct tty_struct *tty)
 {
 	show_mem();
@@ -219,13 +231,13 @@
 	.enable_mask	= SYSRQ_ENABLE_SIGNAL,
 };
 
-static void moom_callback(void *ignored)
+static void moom_callback(struct work_struct *ignored)
 {
 	out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
 			GFP_KERNEL, 0);
 }
 
-static DECLARE_WORK(moom_work, moom_callback, NULL);
+static DECLARE_WORK(moom_work, moom_callback);
 
 static void sysrq_handle_moom(int key, struct tty_struct *tty)
 {
@@ -304,7 +316,7 @@
 	/* May be assigned at init time by SMP VOYAGER */
 	NULL,				/* v */
 	NULL,				/* w */
-	NULL,				/* x */
+	&sysrq_showstate_blocked_op,	/* x */
 	NULL,				/* y */
 	NULL				/* z */
 };
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index dd36fd0..07067c3 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -249,6 +249,7 @@
 
 	return eax;
 }
+EXPORT_SYMBOL(tosh_smm);
 
 
 static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 6e1329d..33e1f66 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -325,9 +325,9 @@
 	schedule_work(&chip->work);
 }
 
-static void timeout_work(void *ptr)
+static void timeout_work(struct work_struct *work)
 {
-	struct tpm_chip *chip = ptr;
+	struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
 
 	down(&chip->buffer_mutex);
 	atomic_set(&chip->data_pending, 0);
@@ -1105,7 +1105,7 @@
 	init_MUTEX(&chip->tpm_mutex);
 	INIT_LIST_HEAD(&chip->list);
 
-	INIT_WORK(&chip->work, timeout_work, chip);
+	INIT_WORK(&chip->work, timeout_work);
 
 	init_timer(&chip->user_read_timer);
 	chip->user_read_timer.function = user_reader_timeout;
@@ -1155,6 +1155,7 @@
 
 	if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
 		list_del(&chip->list);
+		misc_deregister(&chip->vendor.miscdev);
 		put_device(dev);
 		clear_bit(chip->dev_num, dev_mask);
 		kfree(chip);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 50dc492..b3cfc8b 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1254,7 +1254,7 @@
 	
 /**
  *	do_tty_hangup		-	actual handler for hangup events
- *	@data: tty device
+ *	@work: tty device
  *
  *	This can be called by the "eventd" kernel thread.  That is process
  *	synchronous but doesn't hold any locks, so we need to make sure we
@@ -1274,9 +1274,10 @@
  *		tasklist_lock to walk task list for hangup event
  *
  */
-static void do_tty_hangup(void *data)
+static void do_tty_hangup(struct work_struct *work)
 {
-	struct tty_struct *tty = (struct tty_struct *) data;
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, hangup_work);
 	struct file * cons_filp = NULL;
 	struct file *filp, *f = NULL;
 	struct task_struct *p;
@@ -1433,7 +1434,7 @@
 
 	printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
 #endif
-	do_tty_hangup((void *) tty);
+	do_tty_hangup(&tty->hangup_work);
 }
 EXPORT_SYMBOL(tty_vhangup);
 
@@ -3304,12 +3305,13 @@
  * Nasty bug: do_SAK is being called in interrupt context.  This can
  * deadlock.  We punt it up to process context.  AKPM - 16Mar2001
  */
-static void __do_SAK(void *arg)
+static void __do_SAK(struct work_struct *work)
 {
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, SAK_work);
 #ifdef TTY_SOFT_SAK
 	tty_hangup(tty);
 #else
-	struct tty_struct *tty = arg;
 	struct task_struct *g, *p;
 	int session;
 	int		i;
@@ -3388,7 +3390,7 @@
 {
 	if (!tty)
 		return;
-	PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
+	PREPARE_WORK(&tty->SAK_work, __do_SAK);
 	schedule_work(&tty->SAK_work);
 }
 
@@ -3396,7 +3398,7 @@
 
 /**
  *	flush_to_ldisc
- *	@private_: tty structure passed from work queue.
+ *	@work: tty structure passed from work queue.
  *
  *	This routine is called out of the software interrupt to flush data
  *	from the buffer chain to the line discipline.
@@ -3406,9 +3408,10 @@
  *	receive_buf method is single threaded for each tty instance.
  */
  
-static void flush_to_ldisc(void *private_)
+static void flush_to_ldisc(struct work_struct *work)
 {
-	struct tty_struct *tty = (struct tty_struct *) private_;
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, buf.work.work);
 	unsigned long 	flags;
 	struct tty_ldisc *disc;
 	struct tty_buffer *tbuf, *head;
@@ -3553,7 +3556,7 @@
 	spin_unlock_irqrestore(&tty->buf.lock, flags);
 
 	if (tty->low_latency)
-		flush_to_ldisc((void *) tty);
+		flush_to_ldisc(&tty->buf.work.work);
 	else
 		schedule_delayed_work(&tty->buf.work, 1);
 }
@@ -3580,17 +3583,17 @@
 	tty->overrun_time = jiffies;
 	tty->buf.head = tty->buf.tail = NULL;
 	tty_buffer_init(tty);
-	INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
+	INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
 	init_MUTEX(&tty->buf.pty_sem);
 	mutex_init(&tty->termios_mutex);
 	init_waitqueue_head(&tty->write_wait);
 	init_waitqueue_head(&tty->read_wait);
-	INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
+	INIT_WORK(&tty->hangup_work, do_tty_hangup);
 	mutex_init(&tty->atomic_read_lock);
 	mutex_init(&tty->atomic_write_lock);
 	spin_lock_init(&tty->read_lock);
 	INIT_LIST_HEAD(&tty->tty_files);
-	INIT_WORK(&tty->SAK_work, NULL, NULL);
+	INIT_WORK(&tty->SAK_work, NULL);
 }
 
 /*
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 87587b4..a8239da 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -152,10 +152,10 @@
 static void save_cur(struct vc_data *vc);
 static void reset_terminal(struct vc_data *vc, int do_clear);
 static void con_flush_chars(struct tty_struct *tty);
-static void set_vesa_blanking(char __user *p);
+static int set_vesa_blanking(char __user *p);
 static void set_cursor(struct vc_data *vc);
 static void hide_cursor(struct vc_data *vc);
-static void console_callback(void *ignored);
+static void console_callback(struct work_struct *ignored);
 static void blank_screen_t(unsigned long dummy);
 static void set_palette(struct vc_data *vc);
 
@@ -174,7 +174,7 @@
 static int blankinterval = 10*60*HZ;
 static int vesa_off_interval;
 
-static DECLARE_WORK(console_work, console_callback, NULL);
+static DECLARE_WORK(console_work, console_callback);
 
 /*
  * fg_console is the current virtual console,
@@ -2154,7 +2154,7 @@
  * with other console code and prevention of re-entrancy is
  * ensured with console_sem.
  */
-static void console_callback(void *ignored)
+static void console_callback(struct work_struct *ignored)
 {
 	acquire_console_sem();
 
@@ -2369,7 +2369,7 @@
 			ret = __put_user(data, p);
 			break;
 		case TIOCL_SETVESABLANK:
-			set_vesa_blanking(p);
+			ret = set_vesa_blanking(p);
 			break;
 		case TIOCL_GETKMSGREDIRECT:
 			data = kmsg_redirect;
@@ -3313,11 +3313,15 @@
  *	Screen blanking
  */
 
-static void set_vesa_blanking(char __user *p)
+static int set_vesa_blanking(char __user *p)
 {
-    unsigned int mode;
-    get_user(mode, p + 1);
-    vesa_blank_mode = (mode < 4) ? mode : 0;
+	unsigned int mode;
+
+	if (get_user(mode, p + 1))
+		return -EFAULT;
+
+	vesa_blank_mode = (mode < 4) ? mode : 0;
+	return 0;
 }
 
 void do_blank_screen(int entering_gfx)
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index e275dd4..6113872 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -634,7 +634,7 @@
 	usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8);
 
 	/* set up the memory buffer's */
-	if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, SLAB_ATOMIC, &usb_pcwd->intr_dma))) {
+	if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, GFP_ATOMIC, &usb_pcwd->intr_dma))) {
 		printk(KERN_ERR PFX "Out of memory\n");
 		goto error;
 	}
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 05f8ce2..b418b16 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,9 +31,11 @@
 #include <linux/connector.h>
 #include <linux/delay.h>
 
-void cn_queue_wrapper(void *data)
+void cn_queue_wrapper(struct work_struct *work)
 {
-	struct cn_callback_data *d = data;
+	struct cn_callback_entry *cbq =
+		container_of(work, struct cn_callback_entry, work.work);
+	struct cn_callback_data *d = &cbq->data;
 
 	d->callback(d->callback_priv);
 
@@ -57,7 +59,7 @@
 	memcpy(&cbq->id.id, id, sizeof(struct cb_id));
 	cbq->data.callback = callback;
 	
-	INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
+	INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
 	return cbq;
 }
 
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index b49bacf..5e7cd45 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -135,40 +135,39 @@
 	spin_lock_bh(&dev->cbdev->queue_lock);
 	list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
 		if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
-			if (likely(!test_bit(0, &__cbq->work.pending) &&
+			if (likely(!test_bit(WORK_STRUCT_PENDING,
+					     &__cbq->work.work.management) &&
 					__cbq->data.ddata == NULL)) {
 				__cbq->data.callback_priv = msg;
 
 				__cbq->data.ddata = data;
 				__cbq->data.destruct_data = destruct_data;
 
-				if (queue_work(dev->cbdev->cn_queue,
-						&__cbq->work))
+				if (queue_delayed_work(
+					    dev->cbdev->cn_queue,
+					    &__cbq->work, 0))
 					err = 0;
 			} else {
-				struct work_struct *w;
 				struct cn_callback_data *d;
 				
-				w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
-				if (w) {
-					d = (struct cn_callback_data *)(w+1);
-
+				__cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
+				if (__cbq) {
+					d = &__cbq->data;
 					d->callback_priv = msg;
 					d->callback = __cbq->data.callback;
 					d->ddata = data;
 					d->destruct_data = destruct_data;
-					d->free = w;
+					d->free = __cbq;
 
-					INIT_LIST_HEAD(&w->entry);
-					w->pending = 0;
-					w->func = &cn_queue_wrapper;
-					w->data = d;
-					init_timer(&w->timer);
+					INIT_DELAYED_WORK(&__cbq->work,
+							  &cn_queue_wrapper);
 					
-					if (queue_work(dev->cbdev->cn_queue, w))
+					if (queue_delayed_work(
+						    dev->cbdev->cn_queue,
+						    &__cbq->work, 0))
 						err = 0;
 					else {
-						kfree(w);
+						kfree(__cbq);
 						err = -EINVAL;
 					}
 				} else
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index dd0c262..47ab42d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -42,7 +42,7 @@
 
 /* internal prototypes */
 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
-static void handle_update(void *data);
+static void handle_update(struct work_struct *work);
 
 /**
  * Two notifier lists: the "policy" list is involved in the
@@ -665,7 +665,7 @@
 	mutex_init(&policy->lock);
 	mutex_lock(&policy->lock);
 	init_completion(&policy->kobj_unregister);
-	INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
+	INIT_WORK(&policy->update, handle_update);
 
 	/* call driver. From then on the cpufreq must be able
 	 * to accept all calls to ->verify and ->setpolicy for this CPU
@@ -895,9 +895,11 @@
 }
 
 
-static void handle_update(void *data)
+static void handle_update(struct work_struct *work)
 {
-	unsigned int cpu = (unsigned int)(long)data;
+	struct cpufreq_policy *policy =
+		container_of(work, struct cpufreq_policy, update);
+	unsigned int cpu = policy->cpu;
 	dprintk("handle_update for cpu %u called\n", cpu);
 	cpufreq_update_policy(cpu);
 }
@@ -1535,7 +1537,6 @@
 }
 EXPORT_SYMBOL(cpufreq_update_policy);
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int cpufreq_cpu_callback(struct notifier_block *nfb,
 					unsigned long action, void *hcpu)
 {
@@ -1575,7 +1576,6 @@
 {
     .notifier_call = cpufreq_cpu_callback,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 /*********************************************************************
  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c4c578d..5ef5ede 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -59,7 +59,7 @@
 #define MAX_SAMPLING_DOWN_FACTOR		(10)
 #define TRANSITION_LATENCY_LIMIT		(10 * 1000)
 
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
 
 struct cpu_dbs_info_s {
 	struct cpufreq_policy 	*cur_policy;
@@ -82,7 +82,7 @@
  * is recursive for the same process. -Venki
  */
 static DEFINE_MUTEX 	(dbs_mutex);
-static DECLARE_WORK	(dbs_work, do_dbs_timer, NULL);
+static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
 
 struct dbs_tuners {
 	unsigned int 		sampling_rate;
@@ -420,7 +420,7 @@
 	}
 }
 
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 { 
 	int i;
 	lock_cpu_hotplug();
@@ -435,7 +435,6 @@
 
 static inline void dbs_timer_init(void)
 {
-	INIT_WORK(&dbs_work, do_dbs_timer, NULL);
 	schedule_delayed_work(&dbs_work,
 			usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
 	return;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bf8aa45..e1cc511 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,13 +47,17 @@
 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER	(1000)
 #define TRANSITION_LATENCY_LIMIT		(10 * 1000)
 
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
+
+/* Sampling types */
+enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
 
 struct cpu_dbs_info_s {
 	cputime64_t prev_cpu_idle;
 	cputime64_t prev_cpu_wall;
 	struct cpufreq_policy *cur_policy;
- 	struct work_struct work;
+ 	struct delayed_work work;
+	enum dbs_sample sample_type;
 	unsigned int enable;
 	struct cpufreq_frequency_table *freq_table;
 	unsigned int freq_lo;
@@ -407,30 +411,31 @@
 	}
 }
 
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 {
 	unsigned int cpu = smp_processor_id();
 	struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+	enum dbs_sample sample_type = dbs_info->sample_type;
 	/* We want all CPUs to do sampling nearly on same jiffy */
 	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+	/* Permit rescheduling of this work item */
+	work_release(work);
+
 	delay -= jiffies % delay;
 
 	if (!dbs_info->enable)
 		return;
 	/* Common NORMAL_SAMPLE setup */
-	INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
+	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
 	if (!dbs_tuners_ins.powersave_bias ||
-	    (unsigned long) data == DBS_NORMAL_SAMPLE) {
+	    sample_type == DBS_NORMAL_SAMPLE) {
 		lock_cpu_hotplug();
 		dbs_check_cpu(dbs_info);
 		unlock_cpu_hotplug();
 		if (dbs_info->freq_lo) {
 			/* Setup timer for SUB_SAMPLE */
-			INIT_WORK(&dbs_info->work, do_dbs_timer,
-					(void *)DBS_SUB_SAMPLE);
+			dbs_info->sample_type = DBS_SUB_SAMPLE;
 			delay = dbs_info->freq_hi_jiffies;
 		}
 	} else {
@@ -449,7 +454,8 @@
 	delay -= jiffies % delay;
 
 	ondemand_powersave_bias_init();
-	INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
+	INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
+	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
 	queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
 }
 
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 0358419..8e87261 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -636,10 +636,10 @@
 	dma_cookie_t cookie;
 	int err = 0;
 
-	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
+	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
 	if (!src)
 		return -ENOMEM;
-	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
+	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
 	if (!dest) {
 		kfree(src);
 		return -ENOMEM;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 75e9e38..1b4fc92 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -28,6 +28,7 @@
 #include <linux/sysdev.h>
 #include <linux/ctype.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/page.h>
 #include <asm/edac.h>
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
index 4630f19..15edf40 100644
--- a/drivers/i2c/chips/ds1374.c
+++ b/drivers/i2c/chips/ds1374.c
@@ -140,12 +140,14 @@
 	return t1;
 }
 
-static void ds1374_set_work(void *arg)
+static ulong new_time;
+
+static void ds1374_set_work(struct work_struct *work)
 {
 	ulong t1, t2;
 	int limit = 10;		/* arbitrary retry limit */
 
-	t1 = *(ulong *) arg;
+	t1 = new_time;
 
 	mutex_lock(&ds1374_mutex);
 
@@ -167,11 +169,9 @@
 			 "can't confirm time set from rtc chip\n");
 }
 
-static ulong new_time;
-
 static struct workqueue_struct *ds1374_workqueue;
 
-static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time);
+static DECLARE_WORK(ds1374_work, ds1374_set_work);
 
 int ds1374_set_rtc_time(ulong nowtime)
 {
@@ -180,7 +180,7 @@
 	if (in_interrupt())
 		queue_work(ds1374_workqueue, &ds1374_work);
 	else
-		ds1374_set_work(&new_time);
+		ds1374_set_work(NULL);
 
 	return 0;
 }
diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c
index 2dd0a34..420377c 100644
--- a/drivers/i2c/chips/m41t00.c
+++ b/drivers/i2c/chips/m41t00.c
@@ -215,8 +215,15 @@
 }
 
 static ulong new_time;
+/* well, isn't this API just _lovely_? */
+static void
+m41t00_barf(struct work_struct *unusable)
+{
+	m41t00_set(&new_time);
+}
+
 static struct workqueue_struct *m41t00_wq;
-static DECLARE_WORK(m41t00_work, m41t00_set, &new_time);
+static DECLARE_WORK(m41t00_work, m41t00_barf);
 
 int
 m41t00_set_rtc_time(ulong nowtime)
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 0c68d0f..e23bc0d 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -389,14 +389,6 @@
 	  Linux. This may slow disk throughput by a few percent, but at least
 	  things will operate 100% reliably.
 
-config BLK_DEV_SL82C105
-	tristate "Winbond SL82c105 support"
-	depends on PCI && (PPC || ARM) && BLK_DEV_IDEPCI
-	help
-	  If you have a Winbond SL82c105 IDE controller, say Y here to enable
-	  special configuration for this chip. This is common on various CHRP
-	  motherboards, but could be used elsewhere. If in doubt, say Y.
-
 config BLK_DEV_IDEDMA_PCI
 	bool "Generic PCI bus-master DMA support"
 	depends on PCI && BLK_DEV_IDEPCI
@@ -712,6 +704,14 @@
 
 	  Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>.
 
+config BLK_DEV_SL82C105
+	tristate "Winbond SL82c105 support"
+	depends on (PPC || ARM)
+	help
+	  If you have a Winbond SL82c105 IDE controller, say Y here to enable
+	  special configuration for this chip. This is common on various CHRP
+	  motherboards, but could be used elsewhere. If in doubt, say Y.
+
 config BLK_DEV_SLC90E66
 	tristate "SLC90E66 chipset support"
 	help
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 287a662..1689076 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -973,8 +973,8 @@
  *	@drive: drive
  *
  *	Automatically remove all the driver specific settings for this
- *	drive. This function may sleep and must not be called from IRQ
- *	context. The caller must hold ide_setting_sem.
+ *	drive. This function may not be called from IRQ context. The
+ *	caller must hold ide_setting_sem.
  */
  
 static void auto_remove_settings (ide_drive_t *drive)
@@ -1874,11 +1874,22 @@
 {
 	unsigned long flags;
 	
-	down(&ide_setting_sem);
-	spin_lock_irqsave(&ide_lock, flags);
 #ifdef CONFIG_PROC_FS
 	ide_remove_proc_entries(drive->proc, driver->proc);
 #endif
+	down(&ide_setting_sem);
+	spin_lock_irqsave(&ide_lock, flags);
+	/*
+	 * ide_setting_sem protects the settings list
+	 * ide_lock protects the use of settings
+	 *
+	 * so we need to hold both, ide_settings_sem because we want to
+	 * modify the settings list, and ide_lock because we cannot take
+	 * a setting out that is being used.
+	 *
+	 * OTOH both ide_{read,write}_setting are only ever used under
+	 * ide_setting_sem.
+	 */
 	auto_remove_settings(drive);
 	spin_unlock_irqrestore(&ide_lock, flags);
 	up(&ide_setting_sem);
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index eb7ab11..61f1a96 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -282,11 +282,11 @@
 	 * Find the ISA bridge to see how good the IDE is.
 	 */
 	via_config = via_config_find(&isa);
-	if (!via_config->id) {
-		printk(KERN_WARNING "VP_IDE: Unknown VIA SouthBridge, disabling DMA.\n");
-		pci_dev_put(isa);
-		return -ENODEV;
-	}
+
+	/* We checked this earlier so if it fails here deeep badness
+	   is involved */
+
+	BUG_ON(!via_config->id);
 
 	/*
 	 * Setup or disable Clk66 if appropriate
@@ -494,6 +494,17 @@
 
 static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
+	struct pci_dev *isa = NULL;
+	struct via_isa_bridge *via_config;
+	/*
+	 * Find the ISA bridge and check we know what it is.
+	 */
+	via_config = via_config_find(&isa);
+	pci_dev_put(isa);
+	if (!via_config->id) {
+		printk(KERN_WARNING "VP_IDE: Unknown VIA SouthBridge, disabling DMA.\n");
+		return -ENODEV;
+	}
 	return ide_setup_pci_device(dev, &via82cxxx_chipsets[id->driver_data]);
 }
 
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 31e5cc4..27d6c64 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -133,7 +133,7 @@
 #define ETH1394_DRIVER_NAME "eth1394"
 static const char driver_name[] = ETH1394_DRIVER_NAME;
 
-static kmem_cache_t *packet_task_cache;
+static struct kmem_cache *packet_task_cache;
 
 static struct hpsb_highlevel eth1394_highlevel;
 
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index d90a3a1..b935e08 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -31,9 +31,10 @@
 #include "config_roms.h"
 
 
-static void delayed_reset_bus(void * __reset_info)
+static void delayed_reset_bus(struct work_struct *work)
 {
-	struct hpsb_host *host = (struct hpsb_host*)__reset_info;
+	struct hpsb_host *host =
+		container_of(work, struct hpsb_host, delayed_reset.work);
 	int generation = host->csr.generation + 1;
 
 	/* The generation field rolls over to 2 rather than 0 per IEEE
@@ -122,7 +123,7 @@
 	int i;
 	int hostnum = 0;
 
-	h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL);
+	h = kzalloc(sizeof(*h) + extra, GFP_KERNEL);
 	if (!h)
 		return NULL;
 
@@ -145,7 +146,7 @@
 
 	atomic_set(&h->generation, 0);
 
-	INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);
+	INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
 	
 	init_timer(&h->timeout);
 	h->timeout.data = (unsigned long) h;
@@ -234,7 +235,7 @@
 		 * Config ROM in the near future. */
 		reset_delay = HZ;
 
-	PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host);
+	PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
 	schedule_delayed_work(&host->delayed_reset, reset_delay);
 
 	return 0;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index bc6dbfa..d553e38 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -62,7 +62,7 @@
 	struct class_device class_dev;
 
 	int update_config_rom;
-	struct work_struct delayed_reset;
+	struct delayed_work delayed_reset;
 	unsigned int config_roms;
 
 	struct list_head addr_space;
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 8e7b83f..e829c93 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/moduleparam.h>
+#include <linux/freezer.h>
 #include <asm/atomic.h>
 
 #include "csr.h"
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 6e8ea91..eae97d8 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -1225,7 +1225,7 @@
 	int ctx;
 	int ret = -ENOMEM;
 
-	recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
+	recv = kmalloc(sizeof(*recv), GFP_KERNEL);
 	if (!recv)
 		return -ENOMEM;
 
@@ -1918,7 +1918,7 @@
 	int ctx;
 	int ret = -ENOMEM;
 
-	xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
+	xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
 	if (!xmit)
 		return -ENOMEM;
 
@@ -3021,7 +3021,7 @@
 			return -ENOMEM;
 		}
 
-		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
+		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
 		OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
 
                 if (d->prg_cpu[i] != NULL) {
@@ -3117,7 +3117,7 @@
 	OHCI_DMA_ALLOC("dma_rcv prg pool");
 
 	for (i = 0; i < d->num_desc; i++) {
-		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
+		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
 		OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
 
                 if (d->prg_cpu[i] != NULL) {
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 0a7412e..9cab1d6 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1428,7 +1428,7 @@
         	struct i2c_algo_bit_data i2c_adapter_data;
 
         	error = -ENOMEM;
-		i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
+		i2c_ad = kmalloc(sizeof(*i2c_ad), GFP_KERNEL);
         	if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
 
 		memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 5ec4f5e..bf71e06 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -112,7 +112,7 @@
 
 static inline struct pending_request *alloc_pending_request(void)
 {
-	return __alloc_pending_request(SLAB_KERNEL);
+	return __alloc_pending_request(GFP_KERNEL);
 }
 
 static void free_pending_request(struct pending_request *req)
@@ -259,7 +259,7 @@
 	if (hi != NULL) {
 		list_for_each_entry(fi, &hi->file_info_list, list) {
 			if (fi->notification == RAW1394_NOTIFY_ON) {
-				req = __alloc_pending_request(SLAB_ATOMIC);
+				req = __alloc_pending_request(GFP_ATOMIC);
 
 				if (req != NULL) {
 					req->file_info = fi;
@@ -306,13 +306,13 @@
 			if (!(fi->listen_channels & (1ULL << channel)))
 				continue;
 
-			req = __alloc_pending_request(SLAB_ATOMIC);
+			req = __alloc_pending_request(GFP_ATOMIC);
 			if (!req)
 				break;
 
 			if (!ibs) {
 				ibs = kmalloc(sizeof(*ibs) + length,
-					      SLAB_ATOMIC);
+					      GFP_ATOMIC);
 				if (!ibs) {
 					kfree(req);
 					break;
@@ -367,13 +367,13 @@
 			if (!fi->fcp_buffer)
 				continue;
 
-			req = __alloc_pending_request(SLAB_ATOMIC);
+			req = __alloc_pending_request(GFP_ATOMIC);
 			if (!req)
 				break;
 
 			if (!ibs) {
 				ibs = kmalloc(sizeof(*ibs) + length,
-					      SLAB_ATOMIC);
+					      GFP_ATOMIC);
 				if (!ibs) {
 					kfree(req);
 					break;
@@ -593,7 +593,7 @@
 	switch (req->req.type) {
 	case RAW1394_REQ_LIST_CARDS:
 		spin_lock_irqsave(&host_info_lock, flags);
-		khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC);
+		khl = kmalloc(sizeof(*khl) * host_count, GFP_ATOMIC);
 
 		if (khl) {
 			req->req.misc = host_count;
@@ -1045,7 +1045,7 @@
 	}
 	if (arm_addr->notification_options & ARM_READ) {
 		DBGMSG("arm_read -> entering notification-section");
-		req = __alloc_pending_request(SLAB_ATOMIC);
+		req = __alloc_pending_request(GFP_ATOMIC);
 		if (!req) {
 			DBGMSG("arm_read -> rcode_conflict_error");
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1064,7 +1064,7 @@
 			    sizeof(struct arm_response) +
 			    sizeof(struct arm_request_response);
 		}
-		req->data = kmalloc(size, SLAB_ATOMIC);
+		req->data = kmalloc(size, GFP_ATOMIC);
 		if (!(req->data)) {
 			free_pending_request(req);
 			DBGMSG("arm_read -> rcode_conflict_error");
@@ -1198,7 +1198,7 @@
 	}
 	if (arm_addr->notification_options & ARM_WRITE) {
 		DBGMSG("arm_write -> entering notification-section");
-		req = __alloc_pending_request(SLAB_ATOMIC);
+		req = __alloc_pending_request(GFP_ATOMIC);
 		if (!req) {
 			DBGMSG("arm_write -> rcode_conflict_error");
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1209,7 +1209,7 @@
 		    sizeof(struct arm_request) + sizeof(struct arm_response) +
 		    (length) * sizeof(byte_t) +
 		    sizeof(struct arm_request_response);
-		req->data = kmalloc(size, SLAB_ATOMIC);
+		req->data = kmalloc(size, GFP_ATOMIC);
 		if (!(req->data)) {
 			free_pending_request(req);
 			DBGMSG("arm_write -> rcode_conflict_error");
@@ -1400,7 +1400,7 @@
 	if (arm_addr->notification_options & ARM_LOCK) {
 		byte_t *buf1, *buf2;
 		DBGMSG("arm_lock -> entering notification-section");
-		req = __alloc_pending_request(SLAB_ATOMIC);
+		req = __alloc_pending_request(GFP_ATOMIC);
 		if (!req) {
 			DBGMSG("arm_lock -> rcode_conflict_error");
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1408,7 +1408,7 @@
 							   The request may be retried */
 		}
 		size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response);	/* maximum */
-		req->data = kmalloc(size, SLAB_ATOMIC);
+		req->data = kmalloc(size, GFP_ATOMIC);
 		if (!(req->data)) {
 			free_pending_request(req);
 			DBGMSG("arm_lock -> rcode_conflict_error");
@@ -1628,7 +1628,7 @@
 	if (arm_addr->notification_options & ARM_LOCK) {
 		byte_t *buf1, *buf2;
 		DBGMSG("arm_lock64 -> entering notification-section");
-		req = __alloc_pending_request(SLAB_ATOMIC);
+		req = __alloc_pending_request(GFP_ATOMIC);
 		if (!req) {
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			DBGMSG("arm_lock64 -> rcode_conflict_error");
@@ -1636,7 +1636,7 @@
 							   The request may be retried */
 		}
 		size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response);	/* maximum */
-		req->data = kmalloc(size, SLAB_ATOMIC);
+		req->data = kmalloc(size, GFP_ATOMIC);
 		if (!(req->data)) {
 			free_pending_request(req);
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1737,7 +1737,7 @@
 		return (-EINVAL);
 	}
 	/* addr-list-entry for fileinfo */
-	addr = kmalloc(sizeof(*addr), SLAB_KERNEL);
+	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
 	if (!addr) {
 		req->req.length = 0;
 		return (-ENOMEM);
@@ -2103,7 +2103,7 @@
 static int get_config_rom(struct file_info *fi, struct pending_request *req)
 {
 	int ret = sizeof(struct raw1394_request);
-	quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
+	quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
 	int status;
 
 	if (!data)
@@ -2133,7 +2133,7 @@
 static int update_config_rom(struct file_info *fi, struct pending_request *req)
 {
 	int ret = sizeof(struct raw1394_request);
-	quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
+	quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 	if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
@@ -2443,7 +2443,7 @@
 	/* only one ISO activity event may be in the queue */
 	if (!__rawiso_event_in_queue(fi)) {
 		struct pending_request *req =
-		    __alloc_pending_request(SLAB_ATOMIC);
+		    __alloc_pending_request(GFP_ATOMIC);
 
 		if (req) {
 			req->file_info = fi;
@@ -2779,7 +2779,7 @@
 {
 	struct file_info *fi;
 
-	fi = kzalloc(sizeof(*fi), SLAB_KERNEL);
+	fi = kzalloc(sizeof(*fi), GFP_KERNEL);
 	if (!fi)
 		return -ENOMEM;
 
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 6986ac1..cd156d4 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -493,20 +493,25 @@
 		scsi_unblock_requests(scsi_id->scsi_host);
 }
 
-static void sbp2util_write_orb_pointer(void *p)
+static void sbp2util_write_orb_pointer(struct work_struct *work)
 {
+	struct scsi_id_instance_data *scsi_id =
+		container_of(work, struct scsi_id_instance_data,
+			     protocol_work.work);
 	quadlet_t data[2];
 
-	data[0] = ORB_SET_NODE_ID(
-			((struct scsi_id_instance_data *)p)->hi->host->node_id);
-	data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
+	data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
+	data[1] = scsi_id->last_orb_dma;
 	sbp2util_cpu_to_be32_buffer(data, 8);
-	sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8);
+	sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
 }
 
-static void sbp2util_write_doorbell(void *p)
+static void sbp2util_write_doorbell(struct work_struct *work)
 {
-	sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4);
+	struct scsi_id_instance_data *scsi_id =
+		container_of(work, struct scsi_id_instance_data,
+			     protocol_work.work);
+	sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
 }
 
 /*
@@ -843,7 +848,7 @@
 	INIT_LIST_HEAD(&scsi_id->scsi_list);
 	spin_lock_init(&scsi_id->sbp2_command_orb_lock);
 	atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
-	INIT_WORK(&scsi_id->protocol_work, NULL, NULL);
+	INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
 
 	ud->device.driver_data = scsi_id;
 
@@ -2047,11 +2052,10 @@
 		 * We do not accept new commands until the job is over.
 		 */
 		scsi_block_requests(scsi_id->scsi_host);
-		PREPARE_WORK(&scsi_id->protocol_work,
+		PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
 			     last_orb ? sbp2util_write_doorbell:
-					sbp2util_write_orb_pointer,
-			     scsi_id);
-		schedule_work(&scsi_id->protocol_work);
+					sbp2util_write_orb_pointer);
+		schedule_delayed_work(&scsi_id->protocol_work, 0);
 	}
 }
 
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index abbe48e..1b16d6b 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -348,7 +348,7 @@
 	unsigned workarounds;
 
 	atomic_t state;
-	struct work_struct protocol_work;
+	struct delayed_work protocol_work;
 };
 
 /* For use in scsi_id_instance_data.state */
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 7767a11..af93979 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -55,11 +55,11 @@
 	int status;
 };
 
-static void process_req(void *data);
+static void process_req(struct work_struct *work);
 
 static DEFINE_MUTEX(lock);
 static LIST_HEAD(req_list);
-static DECLARE_WORK(work, process_req, NULL);
+static DECLARE_DELAYED_WORK(work, process_req);
 static struct workqueue_struct *addr_wq;
 
 void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -215,7 +215,7 @@
 	return ret;
 }
 
-static void process_req(void *data)
+static void process_req(struct work_struct *work)
 {
 	struct addr_req *req, *temp_req;
 	struct sockaddr_in *src_in, *dst_in;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 20e9f64..98272fb 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -285,9 +285,10 @@
 	kfree(tprops);
 }
 
-static void ib_cache_task(void *work_ptr)
+static void ib_cache_task(struct work_struct *_work)
 {
-	struct ib_update_work *work = work_ptr;
+	struct ib_update_work *work =
+		container_of(_work, struct ib_update_work, work);
 
 	ib_cache_update(work->device, work->port_num);
 	kfree(work);
@@ -306,7 +307,7 @@
 	    event->event == IB_EVENT_CLIENT_REREGISTER) {
 		work = kmalloc(sizeof *work, GFP_ATOMIC);
 		if (work) {
-			INIT_WORK(&work->work, ib_cache_task, work);
+			INIT_WORK(&work->work, ib_cache_task);
 			work->device   = event->device;
 			work->port_num = event->element.port_num;
 			schedule_work(&work->work);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e5dc453..79c937b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -101,7 +101,7 @@
 };
 
 struct cm_work {
-	struct work_struct work;
+	struct delayed_work work;
 	struct list_head list;
 	struct cm_port *port;
 	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
@@ -161,7 +161,7 @@
 	atomic_t work_count;
 };
 
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
 
 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
 {
@@ -668,8 +668,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	timewait_info->work.local_id = local_id;
-	INIT_WORK(&timewait_info->work.work, cm_work_handler,
-		  &timewait_info->work);
+	INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
 	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
 	return timewait_info;
 }
@@ -2995,9 +2994,9 @@
 	}
 }
 
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
 {
-	struct cm_work *work = data;
+	struct cm_work *work = container_of(_work, struct cm_work, work.work);
 	int ret;
 
 	switch (work->cm_event.event) {
@@ -3087,12 +3086,12 @@
 	 * we need to find the cm_id once we're in the context of the
 	 * worker thread, rather than holding a reference on it.
 	 */
-	INIT_WORK(&work->work, cm_work_handler, work);
+	INIT_DELAYED_WORK(&work->work, cm_work_handler);
 	work->local_id = cm_id->local_id;
 	work->remote_id = cm_id->remote_id;
 	work->mad_recv_wc = NULL;
 	work->cm_event.event = IB_CM_USER_ESTABLISHED;
-	queue_work(cm.wq, &work->work);
+	queue_delayed_work(cm.wq, &work->work, 0);
 out:
 	return ret;
 }
@@ -3191,11 +3190,11 @@
 		return;
 	}
 
-	INIT_WORK(&work->work, cm_work_handler, work);
+	INIT_DELAYED_WORK(&work->work, cm_work_handler);
 	work->cm_event.event = event;
 	work->mad_recv_wc = mad_recv_wc;
 	work->port = (struct cm_port *)mad_agent->context;
-	queue_work(cm.wq, &work->work);
+	queue_delayed_work(cm.wq, &work->work, 0);
 }
 
 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index cf48f26..985a6b5 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1340,9 +1340,9 @@
 	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
 }
 
-static void cma_work_handler(void *data)
+static void cma_work_handler(struct work_struct *_work)
 {
-	struct cma_work *work = data;
+	struct cma_work *work = container_of(_work, struct cma_work, work);
 	struct rdma_id_private *id_priv = work->id;
 	int destroy = 0;
 
@@ -1373,7 +1373,7 @@
 		return -ENOMEM;
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, cma_work_handler, work);
+	INIT_WORK(&work->work, cma_work_handler);
 	work->old_state = CMA_ROUTE_QUERY;
 	work->new_state = CMA_ROUTE_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1430,7 +1430,7 @@
 		return -ENOMEM;
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, cma_work_handler, work);
+	INIT_WORK(&work->work, cma_work_handler);
 	work->old_state = CMA_ROUTE_QUERY;
 	work->new_state = CMA_ROUTE_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1583,7 +1583,7 @@
 	}
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, cma_work_handler, work);
+	INIT_WORK(&work->work, cma_work_handler);
 	work->old_state = CMA_ADDR_QUERY;
 	work->new_state = CMA_ADDR_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index cf797d7..1039ad5 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -828,9 +828,9 @@
  * thread asleep on the destroy_comp list vs. an object destroyed
  * here synchronously when the last reference is removed.
  */
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
 {
-	struct iwcm_work *work = arg;
+	struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
 	struct iw_cm_event levent;
 	struct iwcm_id_private *cm_id_priv = work->cm_id;
 	unsigned long flags;
@@ -900,7 +900,7 @@
 		goto out;
 	}
 
-	INIT_WORK(&work->work, cm_work_handler, work);
+	INIT_WORK(&work->work, cm_work_handler);
 	work->cm_id = cm_id_priv;
 	work->event = *iw_event;
 
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 3f9c162..15f38d9 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -65,8 +65,8 @@
 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
 				    struct ib_mad_private *mad);
 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void timeout_sends(void *data);
-static void local_completions(void *data);
+static void timeout_sends(struct work_struct *work);
+static void local_completions(struct work_struct *work);
 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
 			      struct ib_mad_agent_private *agent_priv,
 			      u8 mgmt_class);
@@ -356,10 +356,9 @@
 	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
 	INIT_LIST_HEAD(&mad_agent_priv->done_list);
 	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
-	INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
 	INIT_LIST_HEAD(&mad_agent_priv->local_list);
-	INIT_WORK(&mad_agent_priv->local_work, local_completions,
-		   mad_agent_priv);
+	INIT_WORK(&mad_agent_priv->local_work, local_completions);
 	atomic_set(&mad_agent_priv->refcount, 1);
 	init_completion(&mad_agent_priv->comp);
 
@@ -2198,12 +2197,12 @@
 /*
  * IB MAD completion callback
  */
-static void ib_mad_completion_handler(void *data)
+static void ib_mad_completion_handler(struct work_struct *work)
 {
 	struct ib_mad_port_private *port_priv;
 	struct ib_wc wc;
 
-	port_priv = (struct ib_mad_port_private *)data;
+	port_priv = container_of(work, struct ib_mad_port_private, work);
 	ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
 
 	while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@ -2324,7 +2323,7 @@
 }
 EXPORT_SYMBOL(ib_cancel_mad);
 
-static void local_completions(void *data)
+static void local_completions(struct work_struct *work)
 {
 	struct ib_mad_agent_private *mad_agent_priv;
 	struct ib_mad_local_private *local;
@@ -2334,7 +2333,8 @@
 	struct ib_wc wc;
 	struct ib_mad_send_wc mad_send_wc;
 
-	mad_agent_priv = (struct ib_mad_agent_private *)data;
+	mad_agent_priv =
+		container_of(work, struct ib_mad_agent_private, local_work);
 
 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
 	while (!list_empty(&mad_agent_priv->local_list)) {
@@ -2434,14 +2434,15 @@
 	return ret;
 }
 
-static void timeout_sends(void *data)
+static void timeout_sends(struct work_struct *work)
 {
 	struct ib_mad_agent_private *mad_agent_priv;
 	struct ib_mad_send_wr_private *mad_send_wr;
 	struct ib_mad_send_wc mad_send_wc;
 	unsigned long flags, delay;
 
-	mad_agent_priv = (struct ib_mad_agent_private *)data;
+	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+				      timed_work.work);
 	mad_send_wc.vendor_err = 0;
 
 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2799,7 +2800,7 @@
 		ret = -ENOMEM;
 		goto error8;
 	}
-	INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
+	INIT_WORK(&port_priv->work, ib_mad_completion_handler);
 
 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d06b590..d5548e7 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -102,7 +102,7 @@
 	struct list_head send_list;
 	struct list_head wait_list;
 	struct list_head done_list;
-	struct work_struct timed_work;
+	struct delayed_work timed_work;
 	unsigned long timeout;
 	struct list_head local_list;
 	struct work_struct local_work;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 1ef79d0..3663fd7 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -45,8 +45,8 @@
 struct mad_rmpp_recv {
 	struct ib_mad_agent_private *agent;
 	struct list_head list;
-	struct work_struct timeout_work;
-	struct work_struct cleanup_work;
+	struct delayed_work timeout_work;
+	struct delayed_work cleanup_work;
 	struct completion comp;
 	enum rmpp_state state;
 	spinlock_t lock;
@@ -233,9 +233,10 @@
 	}
 }
 
-static void recv_timeout_handler(void *data)
+static void recv_timeout_handler(struct work_struct *work)
 {
-	struct mad_rmpp_recv *rmpp_recv = data;
+	struct mad_rmpp_recv *rmpp_recv =
+		container_of(work, struct mad_rmpp_recv, timeout_work.work);
 	struct ib_mad_recv_wc *rmpp_wc;
 	unsigned long flags;
 
@@ -254,9 +255,10 @@
 	ib_free_recv_mad(rmpp_wc);
 }
 
-static void recv_cleanup_handler(void *data)
+static void recv_cleanup_handler(struct work_struct *work)
 {
-	struct mad_rmpp_recv *rmpp_recv = data;
+	struct mad_rmpp_recv *rmpp_recv =
+		container_of(work, struct mad_rmpp_recv, cleanup_work.work);
 	unsigned long flags;
 
 	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
@@ -285,8 +287,8 @@
 
 	rmpp_recv->agent = agent;
 	init_completion(&rmpp_recv->comp);
-	INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
-	INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
+	INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
+	INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
 	spin_lock_init(&rmpp_recv->lock);
 	rmpp_recv->state = RMPP_STATE_ACTIVE;
 	atomic_set(&rmpp_recv->refcount, 1);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1706d3c..e45afba 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -360,9 +360,10 @@
 	kfree(sm_ah);
 }
 
-static void update_sm_ah(void *port_ptr)
+static void update_sm_ah(struct work_struct *work)
 {
-	struct ib_sa_port *port = port_ptr;
+	struct ib_sa_port *port =
+		container_of(work, struct ib_sa_port, update_task);
 	struct ib_sa_sm_ah *new_ah, *old_ah;
 	struct ib_port_attr port_attr;
 	struct ib_ah_attr   ah_attr;
@@ -992,8 +993,7 @@
 		if (IS_ERR(sa_dev->port[i].agent))
 			goto err;
 
-		INIT_WORK(&sa_dev->port[i].update_task,
-			  update_sm_ah, &sa_dev->port[i]);
+		INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
 	}
 
 	ib_set_client_data(device, &sa_client, sa_dev);
@@ -1010,7 +1010,7 @@
 		goto err;
 
 	for (i = 0; i <= e - s; ++i)
-		update_sm_ah(&sa_dev->port[i]);
+		update_sm_ah(&sa_dev->port[i].update_task);
 
 	return;
 
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index efe147d..db12cc0 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -179,9 +179,10 @@
 	up_write(&current->mm->mmap_sem);
 }
 
-static void ib_umem_account(void *work_ptr)
+static void ib_umem_account(struct work_struct *_work)
 {
-	struct ib_umem_account_work *work = work_ptr;
+	struct ib_umem_account_work *work =
+		container_of(_work, struct ib_umem_account_work, work);
 
 	down_write(&work->mm->mmap_sem);
 	work->mm->locked_vm -= work->diff;
@@ -216,7 +217,7 @@
 		return;
 	}
 
-	INIT_WORK(&work->work, ib_umem_account, work);
+	INIT_WORK(&work->work, ib_umem_account);
 	work->mm   = mm;
 	work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
 
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index 40caeb5..36620a2 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -164,7 +164,7 @@
  */
 void *vq_repbuf_alloc(struct c2_dev *c2dev)
 {
-	return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC);
+	return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
 }
 
 /*
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 214e2fd..0d6e2c4 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -57,7 +57,7 @@
 	struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
 					      ib_device);
 
-	av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
+	av = kmem_cache_alloc(av_cache, GFP_KERNEL);
 	if (!av) {
 		ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
 			 pd, ah_attr);
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 458fe19..93995b6 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -134,7 +134,7 @@
 	if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
 		return ERR_PTR(-EINVAL);
 
-	my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
+	my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
 	if (!my_cq) {
 		ehca_err(device, "Out of memory for ehca_cq struct device=%p",
 			 device);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 3d1c1c5..cc47e4c 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -108,7 +108,7 @@
 
 void *ehca_alloc_fw_ctrlblock(void)
 {
-	void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL);
+	void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL);
 	if (!ret)
 		ehca_gen_err("Out of memory for ctblk");
 	return ret;
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index abce676..0a5e221 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -53,7 +53,7 @@
 {
 	struct ehca_mr *me;
 
-	me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
+	me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
 	if (me) {
 		memset(me, 0, sizeof(struct ehca_mr));
 		spin_lock_init(&me->mrlock);
@@ -72,7 +72,7 @@
 {
 	struct ehca_mw *me;
 
-	me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
+	me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
 	if (me) {
 		memset(me, 0, sizeof(struct ehca_mw));
 		spin_lock_init(&me->mwlock);
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
index 2c3cdc6..d5345e5 100644
--- a/drivers/infiniband/hw/ehca/ehca_pd.c
+++ b/drivers/infiniband/hw/ehca/ehca_pd.c
@@ -50,7 +50,7 @@
 {
 	struct ehca_pd *pd;
 
-	pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
+	pd = kmem_cache_alloc(pd_cache, GFP_KERNEL);
 	if (!pd) {
 		ehca_err(device, "device=%p context=%p out of memory",
 			 device, context);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 8682aa5..c6c9cef 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -450,7 +450,7 @@
 	if (pd->uobject && udata)
 		context = pd->uobject->context;
 
-	my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
+	my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL);
 	if (!my_qp) {
 		ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 413754b..8536aeb 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -214,9 +214,10 @@
 	unsigned long num_pages;
 };
 
-static void user_pages_account(void *ptr)
+static void user_pages_account(struct work_struct *_work)
 {
-	struct ipath_user_pages_work *work = ptr;
+	struct ipath_user_pages_work *work =
+		container_of(_work, struct ipath_user_pages_work, work);
 
 	down_write(&work->mm->mmap_sem);
 	work->mm->locked_vm -= work->num_pages;
@@ -242,7 +243,7 @@
 
 	goto bail;
 
-	INIT_WORK(&work->work, user_pages_account, work);
+	INIT_WORK(&work->work, user_pages_account);
 	work->mm = mm;
 	work->num_pages = num_pages;
 
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 57cdc1b..27caf3b 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -189,7 +189,7 @@
 on_hca_fail:
 	if (ah->type == MTHCA_AH_PCI_POOL) {
 		ah->av = pci_pool_alloc(dev->av_table.pool,
-					SLAB_ATOMIC, &ah->avdma);
+					GFP_ATOMIC, &ah->avdma);
 		if (!ah->av)
 			return -ENOMEM;
 
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index cd044ea..e948158 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -57,7 +57,7 @@
 module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
 MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
 
-static void catas_reset(void *work_ptr)
+static void catas_reset(struct work_struct *work)
 {
 	struct mthca_dev *dev, *tmpdev;
 	LIST_HEAD(tlist);
@@ -203,7 +203,7 @@
 
 int __init mthca_catas_init(void)
 {
-	INIT_WORK(&catas_work, catas_reset, NULL);
+	INIT_WORK(&catas_work, catas_reset);
 
 	catas_wq = create_singlethread_workqueue("mthca_catas");
 	if (!catas_wq)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index f2b6185..9954799 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -136,11 +136,11 @@
 	struct list_head multicast_list;
 	struct rb_root multicast_tree;
 
-	struct work_struct pkey_task;
-	struct work_struct mcast_task;
+	struct delayed_work pkey_task;
+	struct delayed_work mcast_task;
 	struct work_struct flush_task;
 	struct work_struct restart_task;
-	struct work_struct ah_reap_task;
+	struct delayed_work ah_reap_task;
 
 	struct ib_device *ca;
 	u8            	  port;
@@ -254,13 +254,13 @@
 
 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
 		struct ipoib_ah *address, u32 qpn);
-void ipoib_reap_ah(void *dev_ptr);
+void ipoib_reap_ah(struct work_struct *work);
 
 void ipoib_flush_paths(struct net_device *dev);
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
 
 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_ib_dev_flush(void *dev);
+void ipoib_ib_dev_flush(struct work_struct *work);
 void ipoib_ib_dev_cleanup(struct net_device *dev);
 
 int ipoib_ib_dev_open(struct net_device *dev);
@@ -271,10 +271,10 @@
 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
 void ipoib_dev_cleanup(struct net_device *dev);
 
-void ipoib_mcast_join_task(void *dev_ptr);
+void ipoib_mcast_join_task(struct work_struct *work);
 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
 
-void ipoib_mcast_restart_task(void *dev_ptr);
+void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
 int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
 
@@ -312,7 +312,7 @@
 int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
 int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
 
-void ipoib_pkey_poll(void *dev);
+void ipoib_pkey_poll(struct work_struct *work);
 int ipoib_pkey_dev_delay_open(struct net_device *dev);
 
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 8bf5e9e..f10fba5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -400,10 +400,11 @@
 	spin_unlock_irq(&priv->tx_lock);
 }
 
-void ipoib_reap_ah(void *dev_ptr)
+void ipoib_reap_ah(struct work_struct *work)
 {
-	struct net_device *dev = dev_ptr;
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct ipoib_dev_priv *priv =
+		container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+	struct net_device *dev = priv->dev;
 
 	__ipoib_reap_ah(dev);
 
@@ -613,10 +614,11 @@
 	return 0;
 }
 
-void ipoib_ib_dev_flush(void *_dev)
+void ipoib_ib_dev_flush(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *)_dev;
-	struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
+	struct ipoib_dev_priv *cpriv, *priv =
+		container_of(work, struct ipoib_dev_priv, flush_task);
+	struct net_device *dev = priv->dev;
 
 	if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
 		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
@@ -638,14 +640,14 @@
 	 */
 	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
 		ipoib_ib_dev_up(dev);
-		ipoib_mcast_restart_task(dev);
+		ipoib_mcast_restart_task(&priv->restart_task);
 	}
 
 	mutex_lock(&priv->vlan_mutex);
 
 	/* Flush any child interfaces too */
 	list_for_each_entry(cpriv, &priv->child_intfs, list)
-		ipoib_ib_dev_flush(cpriv->dev);
+		ipoib_ib_dev_flush(&cpriv->flush_task);
 
 	mutex_unlock(&priv->vlan_mutex);
 }
@@ -672,10 +674,11 @@
  * change async notification is available.
  */
 
-void ipoib_pkey_poll(void *dev_ptr)
+void ipoib_pkey_poll(struct work_struct *work)
 {
-	struct net_device *dev = dev_ptr;
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct ipoib_dev_priv *priv =
+		container_of(work, struct ipoib_dev_priv, pkey_task.work);
+	struct net_device *dev = priv->dev;
 
 	ipoib_pkey_dev_check_presence(dev);
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5ba3154..c092802 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -940,11 +940,11 @@
 	INIT_LIST_HEAD(&priv->dead_ahs);
 	INIT_LIST_HEAD(&priv->multicast_list);
 
-	INIT_WORK(&priv->pkey_task,    ipoib_pkey_poll,          priv->dev);
-	INIT_WORK(&priv->mcast_task,   ipoib_mcast_join_task,    priv->dev);
-	INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush,       priv->dev);
-	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
-	INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah,            priv->dev);
+	INIT_DELAYED_WORK(&priv->pkey_task,    ipoib_pkey_poll);
+	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
+	INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush);
+	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
 }
 
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d282d65..b04b72c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -399,7 +399,8 @@
 		mcast->backoff = 1;
 		mutex_lock(&mcast_mutex);
 		if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-			queue_work(ipoib_workqueue, &priv->mcast_task);
+			queue_delayed_work(ipoib_workqueue,
+					   &priv->mcast_task, 0);
 		mutex_unlock(&mcast_mutex);
 		complete(&mcast->done);
 		return;
@@ -435,7 +436,8 @@
 
 	if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
 		if (status == -ETIMEDOUT)
-			queue_work(ipoib_workqueue, &priv->mcast_task);
+			queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+					   0);
 		else
 			queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
 					   mcast->backoff * HZ);
@@ -517,10 +519,11 @@
 		mcast->query_id = ret;
 }
 
-void ipoib_mcast_join_task(void *dev_ptr)
+void ipoib_mcast_join_task(struct work_struct *work)
 {
-	struct net_device *dev = dev_ptr;
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct ipoib_dev_priv *priv =
+		container_of(work, struct ipoib_dev_priv, mcast_task.work);
+	struct net_device *dev = priv->dev;
 
 	if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
 		return;
@@ -610,7 +613,7 @@
 
 	mutex_lock(&mcast_mutex);
 	if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-		queue_work(ipoib_workqueue, &priv->mcast_task);
+		queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
 	mutex_unlock(&mcast_mutex);
 
 	spin_lock_irq(&priv->lock);
@@ -818,10 +821,11 @@
 	}
 }
 
-void ipoib_mcast_restart_task(void *dev_ptr)
+void ipoib_mcast_restart_task(struct work_struct *work)
 {
-	struct net_device *dev = dev_ptr;
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct ipoib_dev_priv *priv =
+		container_of(work, struct ipoib_dev_priv, restart_task);
+	struct net_device *dev = priv->dev;
 	struct dev_mc_list *mclist;
 	struct ipoib_mcast *mcast, *tmcast;
 	LIST_HEAD(remove_list);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 18a0000..693b770 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -48,7 +48,7 @@
 
 static void iser_cq_tasklet_fn(unsigned long data);
 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-static void iser_comp_error_worker(void *data);
+static void iser_comp_error_worker(struct work_struct *work);
 
 static void iser_cq_event_callback(struct ib_event *cause, void *context)
 {
@@ -480,8 +480,7 @@
 	init_waitqueue_head(&ib_conn->wait);
 	atomic_set(&ib_conn->post_recv_buf_count, 0);
 	atomic_set(&ib_conn->post_send_buf_count, 0);
-	INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker,
-		  ib_conn);
+	INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
 	INIT_LIST_HEAD(&ib_conn->conn_list);
 	spin_lock_init(&ib_conn->lock);
 
@@ -754,9 +753,10 @@
 	return ret_val;
 }
 
-static void iser_comp_error_worker(void *data)
+static void iser_comp_error_worker(struct work_struct *work)
 {
-	struct iser_conn *ib_conn = data;
+	struct iser_conn *ib_conn =
+		container_of(work, struct iser_conn, comperror_work);
 
 	/* getting here when the state is UP means that the conn is being *
 	 * terminated asynchronously from the iSCSI layer's perspective.  */
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 64ab5fc..a628959 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -390,9 +390,10 @@
 	wait_for_completion(&target->done);
 }
 
-static void srp_remove_work(void *target_ptr)
+static void srp_remove_work(struct work_struct *work)
 {
-	struct srp_target_port *target = target_ptr;
+	struct srp_target_port *target =
+		container_of(work, struct srp_target_port, work);
 
 	spin_lock_irq(target->scsi_host->host_lock);
 	if (target->state != SRP_TARGET_DEAD) {
@@ -575,7 +576,7 @@
 	spin_lock_irq(target->scsi_host->host_lock);
 	if (target->state == SRP_TARGET_CONNECTING) {
 		target->state = SRP_TARGET_DEAD;
-		INIT_WORK(&target->work, srp_remove_work, target);
+		INIT_WORK(&target->work, srp_remove_work);
 		schedule_work(&target->work);
 	}
 	spin_unlock_irq(target->scsi_host->host_lock);
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index a0af97e..79dfb4b 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -23,6 +23,7 @@
 #include <linux/kthread.h>
 #include <linux/sched.h>	/* HZ */
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 /*#include <asm/io.h>*/
 
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index cbb9366..8451b29 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -567,9 +567,9 @@
  * interrupt context.
  */
 
-static void atkbd_event_work(void *data)
+static void atkbd_event_work(struct work_struct *work)
 {
-	struct atkbd *atkbd = data;
+	struct atkbd *atkbd = container_of(work, struct atkbd, event_work);
 
 	mutex_lock(&atkbd->event_mutex);
 
@@ -943,7 +943,7 @@
 
 	atkbd->dev = dev;
 	ps2_init(&atkbd->ps2dev, serio);
-	INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd);
+	INIT_WORK(&atkbd->event_work, atkbd_event_work);
 	mutex_init(&atkbd->event_mutex);
 
 	switch (serio->id.type) {
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 979b93e..b7f049b 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -572,9 +572,9 @@
  * were in.
  */
 static void
-lkkbd_reinit (void *data)
+lkkbd_reinit (struct work_struct *work)
 {
-	struct lkkbd *lk = data;
+	struct lkkbd *lk = container_of(work, struct lkkbd, tq);
 	int division;
 	unsigned char leds_on = 0;
 	unsigned char leds_off = 0;
@@ -651,7 +651,7 @@
 
 	lk->serio = serio;
 	lk->dev = input_dev;
-	INIT_WORK (&lk->tq, lkkbd_reinit, lk);
+	INIT_WORK (&lk->tq, lkkbd_reinit);
 	lk->bell_volume = bell_volume;
 	lk->keyclick_volume = keyclick_volume;
 	lk->ctrlclick_volume = ctrlclick_volume;
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index cac4781..6cd887c 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -208,9 +208,9 @@
  * were in.
  */
 
-static void sunkbd_reinit(void *data)
+static void sunkbd_reinit(struct work_struct *work)
 {
-	struct sunkbd *sunkbd = data;
+	struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
 
 	wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
 
@@ -248,7 +248,7 @@
 	sunkbd->serio = serio;
 	sunkbd->dev = input_dev;
 	init_waitqueue_head(&sunkbd->wait);
-	INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd);
+	INIT_WORK(&sunkbd->tq, sunkbd_reinit);
 	snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys);
 
 	serio_set_drvdata(serio, sunkbd);
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index ab4da79..31d5a13 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -695,7 +695,9 @@
 
 	if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
 		return ret;
-	misc_register(&hp_sdc_rtc_dev);
+	if (misc_register(&hp_sdc_rtc_dev) != 0)
+		printk(KERN_INFO "Could not register misc. dev for i8042 rtc\n");
+
         create_proc_read_entry ("driver/rtc", 0, NULL,
 				hp_sdc_rtc_read_proc, NULL);
 
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 6f9b2c7..52bb222 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -888,9 +888,10 @@
  * psmouse_resync() attempts to re-validate current protocol.
  */
 
-static void psmouse_resync(void *p)
+static void psmouse_resync(struct work_struct *work)
 {
-	struct psmouse *psmouse = p, *parent = NULL;
+	struct psmouse *parent = NULL, *psmouse =
+		container_of(work, struct psmouse, resync_work);
 	struct serio *serio = psmouse->ps2dev.serio;
 	psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
 	int failed = 0, enabled = 0;
@@ -1121,7 +1122,7 @@
 		goto out;
 
 	ps2_init(&psmouse->ps2dev, serio);
-	INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
+	INIT_WORK(&psmouse->resync_work, psmouse_resync);
 	psmouse->dev = input_dev;
 	snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
 
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index e5b1b60..b3e84d3 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -251,9 +251,9 @@
  * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.)
  */
 
-static void ps2_execute_scheduled_command(void *data)
+static void ps2_execute_scheduled_command(struct work_struct *work)
 {
-	struct ps2work *ps2work = data;
+	struct ps2work *ps2work = container_of(work, struct ps2work, work);
 
 	ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command);
 	kfree(ps2work);
@@ -278,7 +278,7 @@
 	ps2work->ps2dev = ps2dev;
 	ps2work->command = command;
 	memcpy(ps2work->param, param, send);
-	INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work);
+	INIT_WORK(&ps2work->work, ps2_execute_scheduled_command);
 
 	if (!schedule_work(&ps2work->work)) {
 		kfree(ps2work);
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 211943f..5f1d403 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
 MODULE_DESCRIPTION("Serio abstraction core");
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index f56d6a0..0517c73 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -189,7 +189,7 @@
 {
 	struct spi_device	*spi = to_spi_device(dev);
 	struct ads7846		*ts = dev_get_drvdata(dev);
-	struct ser_req		*req = kzalloc(sizeof *req, SLAB_KERNEL);
+	struct ser_req		*req = kzalloc(sizeof *req, GFP_KERNEL);
 	int			status;
 	int			sample;
 	int			i;
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c
index 6ae6eb3..946c38c 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/isdn/act2000/capi.c
@@ -627,8 +627,10 @@
 }
 
 void
-actcapi_dispatch(act2000_card *card)
+actcapi_dispatch(struct work_struct *work)
 {
+	struct act2000_card *card =
+		container_of(work, struct act2000_card, rcv_tq);
 	struct sk_buff *skb;
 	actcapi_msg *msg;
 	__u16 ccmd;
diff --git a/drivers/isdn/act2000/capi.h b/drivers/isdn/act2000/capi.h
index 49f453c..e55f6a9 100644
--- a/drivers/isdn/act2000/capi.h
+++ b/drivers/isdn/act2000/capi.h
@@ -356,7 +356,7 @@
 extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *);
 extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *);
 extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8);
-extern void actcapi_dispatch(act2000_card *);
+extern void actcapi_dispatch(struct work_struct *);
 #ifdef DEBUG_MSG
 extern void actcapi_debug_msg(struct sk_buff *skb, int);
 #else
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index d89dcde..90593e2 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -192,8 +192,11 @@
 }
 
 static void
-act2000_transmit(struct act2000_card *card)
+act2000_transmit(struct work_struct *work)
 {
+	struct act2000_card *card =
+		container_of(work, struct act2000_card, snd_tq);
+
 	switch (card->bus) {
 		case ACT2000_BUS_ISA:
 			act2000_isa_send(card);
@@ -207,8 +210,11 @@
 }
 
 static void
-act2000_receive(struct act2000_card *card)
+act2000_receive(struct work_struct *work)
 {
+	struct act2000_card *card =
+		container_of(work, struct act2000_card, poll_tq);
+
 	switch (card->bus) {
 		case ACT2000_BUS_ISA:
 			act2000_isa_receive(card);
@@ -227,7 +233,7 @@
 	act2000_card * card = (act2000_card *)data;
 	unsigned long flags;
 
-	act2000_receive(card);
+	act2000_receive(&card->poll_tq);
 	spin_lock_irqsave(&card->lock, flags);
 	mod_timer(&card->ptimer, jiffies+3);
 	spin_unlock_irqrestore(&card->lock, flags);
@@ -578,9 +584,9 @@
 	skb_queue_head_init(&card->sndq);
 	skb_queue_head_init(&card->rcvq);
 	skb_queue_head_init(&card->ackq);
-	INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card);
-	INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card);
-	INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card);
+	INIT_WORK(&card->snd_tq, act2000_transmit);
+	INIT_WORK(&card->rcv_tq, actcapi_dispatch);
+	INIT_WORK(&card->poll_tq, act2000_receive);
 	init_timer(&card->ptimer);
 	card->interface.owner = THIS_MODULE;
         card->interface.channels = ACT2000_BCH;
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 8c4fcb9..783a255 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -208,9 +208,10 @@
 	}
 }
 
-static void notify_handler(void *data)
+static void notify_handler(struct work_struct *work)
 {
-	struct capi_notifier *np = data;
+	struct capi_notifier *np =
+		container_of(work, struct capi_notifier, work);
 
 	switch (np->cmd) {
 	case KCI_CONTRUP:
@@ -235,7 +236,7 @@
 	if (!np)
 		return -ENOMEM;
 
-	INIT_WORK(&np->work, notify_handler, np);
+	INIT_WORK(&np->work, notify_handler);
 	np->cmd = cmd;
 	np->controller = controller;
 	np->applid = applid;
@@ -248,10 +249,11 @@
 	
 /* -------- Receiver ------------------------------------------ */
 
-static void recv_handler(void *_ap)
+static void recv_handler(struct work_struct *work)
 {
 	struct sk_buff *skb;
-	struct capi20_appl *ap = (struct capi20_appl *) _ap;
+	struct capi20_appl *ap =
+		container_of(work, struct capi20_appl, recv_work);
 
 	if ((!ap) || (ap->release_in_progress))
 		return;
@@ -527,7 +529,7 @@
 	ap->callback = NULL;
 	init_MUTEX(&ap->recv_sem);
 	skb_queue_head_init(&ap->recv_queue);
-	INIT_WORK(&ap->recv_work, recv_handler, (void *)ap);
+	INIT_WORK(&ap->recv_work, recv_handler);
 	ap->release_in_progress = 0;
 
 	write_unlock_irqrestore(&application_lock, flags);
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 0c93732..63b629b 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -572,7 +572,7 @@
 			     ucs->rcvbuf, ucs->rcvbuf_size,
 			     read_ctrl_callback, cs->inbuf);
 
-	if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) {
+	if ((ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC)) != 0) {
 		update_basstate(ucs, 0, BS_ATRDPEND);
 		dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
 			get_usb_rcmsg(ret));
@@ -747,7 +747,7 @@
 	check_pending(ucs);
 
 resubmit:
-	rc = usb_submit_urb(urb, SLAB_ATOMIC);
+	rc = usb_submit_urb(urb, GFP_ATOMIC);
 	if (unlikely(rc != 0 && rc != -ENODEV)) {
 		dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
 			get_usb_rcmsg(rc));
@@ -807,7 +807,7 @@
 			urb->number_of_packets = BAS_NUMFRAMES;
 			gig_dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit",
 				__func__);
-			rc = usb_submit_urb(urb, SLAB_ATOMIC);
+			rc = usb_submit_urb(urb, GFP_ATOMIC);
 			if (unlikely(rc != 0 && rc != -ENODEV)) {
 				dev_err(bcs->cs->dev,
 					"could not resubmit isochronous read "
@@ -900,7 +900,7 @@
 		}
 
 		dump_urb(DEBUG_ISO, "Initial isoc read", urb);
-		if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0)
+		if ((rc = usb_submit_urb(urb, GFP_ATOMIC)) != 0)
 			goto error;
 	}
 
@@ -935,7 +935,7 @@
 	/* submit two URBs, keep third one */
 	for (k = 0; k < 2; ++k) {
 		dump_urb(DEBUG_ISO, "Initial isoc write", urb);
-		rc = usb_submit_urb(ubc->isoouturbs[k].urb, SLAB_ATOMIC);
+		rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC);
 		if (rc != 0)
 			goto error;
 	}
@@ -1042,7 +1042,7 @@
 		return 0;	/* no data to send */
 	urb->number_of_packets = nframe;
 
-	rc = usb_submit_urb(urb, SLAB_ATOMIC);
+	rc = usb_submit_urb(urb, GFP_ATOMIC);
 	if (unlikely(rc)) {
 		if (rc == -ENODEV)
 			/* device removed - give up silently */
@@ -1341,7 +1341,7 @@
 		urb->dev = bcs->cs->hw.bas->udev;
 		urb->transfer_flags = URB_ISO_ASAP;
 		urb->number_of_packets = BAS_NUMFRAMES;
-		rc = usb_submit_urb(urb, SLAB_ATOMIC);
+		rc = usb_submit_urb(urb, GFP_ATOMIC);
 		if (unlikely(rc != 0 && rc != -ENODEV)) {
 			dev_err(cs->dev,
 				"could not resubmit isochronous read URB: %s\n",
@@ -1458,7 +1458,7 @@
 			   ucs->retry_ctrl);
 		/* urb->dev is clobbered by USB subsystem */
 		urb->dev = ucs->udev;
-		rc = usb_submit_urb(urb, SLAB_ATOMIC);
+		rc = usb_submit_urb(urb, GFP_ATOMIC);
 		if (unlikely(rc)) {
 			dev_err(&ucs->interface->dev,
 				"could not resubmit request 0x%02x: %s\n",
@@ -1517,7 +1517,7 @@
 			     (unsigned char*) &ucs->dr_ctrl, NULL, 0,
 			     write_ctrl_callback, ucs);
 	ucs->retry_ctrl = 0;
-	ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC);
+	ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
 	if (unlikely(ret)) {
 		dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n",
 			req, get_usb_rcmsg(ret));
@@ -1763,7 +1763,7 @@
 			     usb_sndctrlpipe(ucs->udev, 0),
 			     (unsigned char*) &ucs->dr_cmd_out, buf, len,
 			     write_command_callback, cs);
-	rc = usb_submit_urb(ucs->urb_cmd_out, SLAB_ATOMIC);
+	rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
 	if (unlikely(rc)) {
 		update_basstate(ucs, 0, BS_ATWRPEND);
 		dev_err(cs->dev, "could not submit HD_WRITE_ATMESSAGE: %s\n",
@@ -2218,21 +2218,21 @@
 	 * - three for the different uses of the default control pipe
 	 * - three for each isochronous pipe
 	 */
-	if (!(ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL)) ||
-	    !(ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL)) ||
-	    !(ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL)) ||
-	    !(ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL)))
+	if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) ||
+	    !(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) ||
+	    !(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) ||
+	    !(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
 		goto allocerr;
 
 	for (j = 0; j < 2; ++j) {
 		ubc = cs->bcs[j].hw.bas;
 		for (i = 0; i < BAS_OUTURBS; ++i)
 			if (!(ubc->isoouturbs[i].urb =
-			      usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL)))
+			      usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
 				goto allocerr;
 		for (i = 0; i < BAS_INURBS; ++i)
 			if (!(ubc->isoinurbs[i] =
-			      usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL)))
+			      usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
 				goto allocerr;
 	}
 
@@ -2246,7 +2246,7 @@
 					(endpoint->bEndpointAddress) & 0x0f),
 			 ucs->int_in_buf, 3, read_int_callback, cs,
 			 endpoint->bInterval);
-	if ((rc = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL)) != 0) {
+	if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
 		dev_err(cs->dev, "could not submit interrupt URB: %s\n",
 			get_usb_rcmsg(rc));
 		goto error;
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 5ebf49ac9..04f2ad7 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -410,7 +410,7 @@
 
 	if (resubmit) {
 		spin_lock_irqsave(&cs->lock, flags);
-		r = cs->connected ? usb_submit_urb(urb, SLAB_ATOMIC) : -ENODEV;
+		r = cs->connected ? usb_submit_urb(urb, GFP_ATOMIC) : -ENODEV;
 		spin_unlock_irqrestore(&cs->lock, flags);
 		if (r)
 			dev_err(cs->dev, "error %d when resubmitting urb.\n",
@@ -486,7 +486,7 @@
 			atomic_set(&ucs->busy, 1);
 
 			spin_lock_irqsave(&cs->lock, flags);
-			status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC) : -ENODEV;
+			status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV;
 			spin_unlock_irqrestore(&cs->lock, flags);
 
 			if (status) {
@@ -664,7 +664,7 @@
 						  ucs->bulk_out_endpointAddr & 0x0f),
 				  ucs->bulk_out_buffer, count,
 				  gigaset_write_bulk_callback, cs);
-		ret = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC);
+		ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
 	} else {
 		ret = -ENODEV;
 	}
@@ -763,7 +763,7 @@
 		goto error;
 	}
 
-	ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL);
+	ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!ucs->bulk_out_urb) {
 		dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n");
 		retval = -ENOMEM;
@@ -774,7 +774,7 @@
 
 	atomic_set(&ucs->busy, 0);
 
-	ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL);
+	ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!ucs->read_urb) {
 		dev_err(cs->dev, "No free urbs available\n");
 		retval = -ENOMEM;
@@ -797,7 +797,7 @@
 			 gigaset_read_int_callback,
 			 cs->inbuf + 0, endpoint->bInterval);
 
-	retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL);
+	retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
 	if (retval) {
 		dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval);
 		goto error;
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index bec5901..3b19cae 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -232,9 +232,10 @@
 
 
 static void
-Amd7930_bh(struct IsdnCardState *cs)
+Amd7930_bh(struct work_struct *work)
 {
-
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
         struct PStack *stptr;
 
 	if (!cs)
@@ -789,7 +790,7 @@
 void __devinit
 setup_Amd7930(struct IsdnCardState *cs)
 {
-        INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs);
+        INIT_WORK(&cs->tqueue, Amd7930_bh);
 	cs->dbusytimer.function = (void *) dbusy_timer_handler;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 785b085..cede72c 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1137,7 +1137,6 @@
 	cs->tx_skb = NULL;
 	cs->tx_cnt = 0;
 	cs->event = 0;
-	cs->tqueue.data = cs;
 
 	skb_queue_head_init(&cs->rq);
 	skb_queue_head_init(&cs->sq);
@@ -1554,7 +1553,7 @@
 static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg);
 static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs);
 static void hisax_bc_close(struct BCState *bcs);
-static void hisax_bh(struct IsdnCardState *cs);
+static void hisax_bh(struct work_struct *work);
 static void EChannel_proc_rcv(struct hisax_d_if *d_if);
 
 int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
@@ -1586,7 +1585,7 @@
 	hisax_d_if->cs = cs;
 	cs->hw.hisax_d_if = hisax_d_if;
 	cs->cardmsg = hisax_cardmsg;
-	INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs);
+	INIT_WORK(&cs->tqueue, hisax_bh);
 	cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1;
 	for (i = 0; i < 2; i++) {
 		cs->bcs[i].BC_SetStack = hisax_bc_setstack;
@@ -1618,8 +1617,10 @@
 	schedule_work(&cs->tqueue);
 }
 
-static void hisax_bh(struct IsdnCardState *cs)
+static void hisax_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	struct PStack *st;
 	int pr;
 
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index d852c9d..de9b1a4 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1083,8 +1083,9 @@
 /* bottom half handler for interrupt */
 /*************************************/
 static void
-hfc4s8s_bh(hfc4s8s_hw * hw)
+hfc4s8s_bh(struct work_struct *work)
 {
+	hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
 	u_char b;
 	struct hfc4s8s_l1 *l1p;
 	volatile u_char *fifo_stat;
@@ -1550,7 +1551,7 @@
 		goto out;
 	}
 
-	INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw);
+	INIT_WORK(&hw->tqueue, hfc4s8s_bh);
 
 	if (request_irq
 	    (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index 6360e82..8d98644 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -549,10 +549,11 @@
 }
 
 static void
-hfcd_bh(struct IsdnCardState *cs)
+hfcd_bh(struct work_struct *work)
 {
-	if (!cs)
-		return;
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
+
 	if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
 		switch (cs->dc.hfcd.ph_state) {
 			case (0):
@@ -1072,5 +1073,5 @@
 	cs->dbusytimer.function = (void *) hfc_dbusy_timer;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
-	INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs);
+	INIT_WORK(&cs->tqueue, hfcd_bh);
 }
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 93f60b5..5db0a85 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1506,8 +1506,10 @@
 /* handle L1 state changes */
 /***************************/
 static void
-hfcpci_bh(struct IsdnCardState *cs)
+hfcpci_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	u_long	flags;
 //      struct PStack *stptr;
 
@@ -1722,7 +1724,7 @@
 		Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
 		/* At this point the needed PCI config is done */
 		/* fifos are still not enabled */
-		INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs);
+		INIT_WORK(&cs->tqueue,  hfcpci_bh);
 		cs->setstack_d = setstack_hfcpci;
 		cs->BC_Send_Data = &hfcpci_send_data;
 		cs->readisac = NULL;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 954d153..4fd09d2 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1251,8 +1251,10 @@
 /* handle L1 state changes */
 /***************************/
 static void
-hfcsx_bh(struct IsdnCardState *cs)
+hfcsx_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	u_long flags;
 
 	if (!cs)
@@ -1499,7 +1501,7 @@
 	cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
-	INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs);
+	INIT_WORK(&cs->tqueue, hfcsx_bh);
 	cs->readisac = NULL;
 	cs->writeisac = NULL;
 	cs->readisacfifo = NULL;
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index da70692..682cac3 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -77,8 +77,10 @@
 }
 
 static void
-icc_bh(struct IsdnCardState *cs)
+icc_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	struct PStack *stptr;
 	
 	if (!cs)
@@ -674,7 +676,7 @@
 void __devinit
 setup_icc(struct IsdnCardState *cs)
 {
-	INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs);
+	INIT_WORK(&cs->tqueue, icc_bh);
 	cs->dbusytimer.function = (void *) dbusy_timer_handler;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 282f349..4e9f238 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -81,8 +81,10 @@
 }
 
 static void
-isac_bh(struct IsdnCardState *cs)
+isac_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	struct PStack *stptr;
 	
 	if (!cs)
@@ -674,7 +676,7 @@
 void __devinit
 setup_isac(struct IsdnCardState *cs)
 {
-	INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs);
+	INIT_WORK(&cs->tqueue, isac_bh);
 	cs->dbusytimer.function = (void *) dbusy_timer_handler;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 674af67..6f1a658 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -437,8 +437,10 @@
 #define B_LL_OK		10
 
 static void
-isar_bh(struct BCState *bcs)
+isar_bh(struct work_struct *work)
 {
+	struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
 	BChannel_bh(bcs);
 	if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event))
 		ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR);
@@ -1580,7 +1582,7 @@
 		cs->bcs[i].mode = 0;
 		cs->bcs[i].hw.isar.dpath = i + 1;
 		modeisar(&cs->bcs[i], 0, 0);
-		INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]);
+		INIT_WORK(&cs->bcs[i].tqueue, isar_bh);
 	}
 }
 
diff --git a/drivers/isdn/hisax/isdnhdlc.h b/drivers/isdn/hisax/isdnhdlc.h
index 2693159..5655b5f 100644
--- a/drivers/isdn/hisax/isdnhdlc.h
+++ b/drivers/isdn/hisax/isdnhdlc.h
@@ -41,10 +41,10 @@
 	unsigned char shift_reg;
 	unsigned char ffvalue;
 
-	int data_received:1; 	// set if transferring data
-	int dchannel:1; 	// set if D channel (send idle instead of flags)
-	int do_adapt56:1; 	// set if 56K adaptation
-        int do_closing:1; 	// set if in closing phase (need to send CRC + flag
+	unsigned int data_received:1; 	// set if transferring data
+	unsigned int dchannel:1; 	// set if D channel (send idle instead of flags)
+	unsigned int do_adapt56:1; 	// set if 56K adaptation
+	unsigned int do_closing:1; 	// set if in closing phase (need to send CRC + flag
 };
 
 
diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c
index bab3568..a14204e 100644
--- a/drivers/isdn/hisax/isdnl1.c
+++ b/drivers/isdn/hisax/isdnl1.c
@@ -315,8 +315,10 @@
 }
 
 void
-BChannel_bh(struct BCState *bcs)
+BChannel_bh(struct work_struct *work)
 {
+	struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
 	if (!bcs)
 		return;
 	if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event))
@@ -362,7 +364,7 @@
 
 	bcs->cs = cs;
 	bcs->channel = bc;
-	INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs);
+	INIT_WORK(&bcs->tqueue, BChannel_bh);
 	spin_lock_init(&bcs->aclock);
 	bcs->BC_SetStack = NULL;
 	bcs->BC_Close = NULL;
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index 1655341..3aeceaf 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -101,8 +101,10 @@
 }
 
 static void
-W6692_bh(struct IsdnCardState *cs)
+W6692_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	struct PStack *stptr;
 
 	if (!cs)
@@ -1070,7 +1072,7 @@
 	       id_list[cs->subtyp].card_name, cs->irq,
 	       cs->hw.w6692.iobase);
 
-	INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs);
+	INIT_WORK(&cs->tqueue, W6692_bh);
 	cs->readW6692 = &ReadW6692;
 	cs->writeW6692 = &WriteW6692;
 	cs->readisacfifo = &ReadISACfifo;
diff --git a/drivers/isdn/hysdn/boardergo.c b/drivers/isdn/hysdn/boardergo.c
index 82e42a8..a120649 100644
--- a/drivers/isdn/hysdn/boardergo.c
+++ b/drivers/isdn/hysdn/boardergo.c
@@ -71,8 +71,9 @@
 /* may be queued from everywhere (interrupts included).                       */
 /******************************************************************************/
 static void
-ergo_irq_bh(hysdn_card * card)
+ergo_irq_bh(struct work_struct *ugli_api)
 {
+	hysdn_card * card = container_of(ugli_api, hysdn_card, irq_queue);
 	tErgDpram *dpr;
 	int again;
 	unsigned long flags;
@@ -442,7 +443,7 @@
 	card->writebootseq = ergo_writebootseq;
 	card->waitpofready = ergo_waitpofready;
 	card->set_errlog_state = ergo_set_errlog_state;
-	INIT_WORK(&card->irq_queue, (void *) (void *) ergo_irq_bh, card);
+	INIT_WORK(&card->irq_queue, ergo_irq_bh);
 	card->hysdn_lock = SPIN_LOCK_UNLOCKED;
 
 	return (0);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f8d6ae..2e4daeb 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -984,9 +984,9 @@
 /*
  * called from tq_immediate
  */
-static void isdn_net_softint(void *private)
+static void isdn_net_softint(struct work_struct *work)
 {
-	isdn_net_local *lp = private;
+	isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
 	struct sk_buff *skb;
 
 	spin_lock_bh(&lp->xmit_lock);
@@ -2596,7 +2596,7 @@
 	netdev->local->netdev = netdev;
 	netdev->local->next = netdev->local;
 
-	INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local);
+	INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
 	spin_lock_init(&netdev->local->xmit_lock);
 
 	netdev->local->isdn_device = -1;
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 6ead5e1..1966f34 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -68,8 +68,6 @@
 static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
 
 
-extern void pcbit_deliver(void * data);
-
 int pcbit_init_dev(int board, int mem_base, int irq)
 {
 	struct pcbit_dev *dev;
@@ -129,7 +127,7 @@
 	memset(dev->b2, 0, sizeof(struct pcbit_chan));
 	dev->b2->id = 1;
 
-	INIT_WORK(&dev->qdelivery, pcbit_deliver, dev);
+	INIT_WORK(&dev->qdelivery, pcbit_deliver);
 
 	/*
 	 *  interrupts
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index 937fd21..0c9f6df 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -67,7 +67,6 @@
  *  Prototypes
  */
 
-void pcbit_deliver(void *data);
 static void pcbit_transmit(struct pcbit_dev *dev);
 
 static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack);
@@ -299,11 +298,12 @@
  */
 
 void
-pcbit_deliver(void *data)
+pcbit_deliver(struct work_struct *work)
 {
 	struct frame_buf *frame;
 	unsigned long flags, msg;
-	struct pcbit_dev *dev = (struct pcbit_dev *) data;
+	struct pcbit_dev *dev =
+		container_of(work, struct pcbit_dev, qdelivery);
 
 	spin_lock_irqsave(&dev->lock, flags);
 
diff --git a/drivers/isdn/pcbit/pcbit.h b/drivers/isdn/pcbit/pcbit.h
index 388bace..19c18e8 100644
--- a/drivers/isdn/pcbit/pcbit.h
+++ b/drivers/isdn/pcbit/pcbit.h
@@ -166,4 +166,6 @@
 #define L2_RUNNING  5
 #define L2_ERROR    6
 
+extern void pcbit_deliver(struct work_struct *work);
+
 #endif
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9c39b98..176142c 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -76,6 +76,12 @@
 	  This option enables support for the Soekris net4801 and net4826 error
 	  LED.
 
+config LEDS_WRAP
+	tristate "LED Support for the WRAP series LEDs"
+	depends on LEDS_CLASS && SCx200_GPIO
+	help
+	  This option enables support for the PCEngines WRAP programmable LEDs.
+
 comment "LED Triggers"
 
 config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 6aa2aed..500de3dc 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_LEDS_S3C24XX)		+= leds-s3c24xx.o
 obj-$(CONFIG_LEDS_AMS_DELTA)		+= leds-ams-delta.o
 obj-$(CONFIG_LEDS_NET48XX)		+= leds-net48xx.o
+obj-$(CONFIG_LEDS_WRAP)			+= leds-wrap.o
 
 # LED Triggers
 obj-$(CONFIG_LEDS_TRIGGER_TIMER)	+= ledtrig-timer.o
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
new file mode 100644
index 0000000..27fb2d8
--- /dev/null
+++ b/drivers/leds/leds-wrap.c
@@ -0,0 +1,142 @@
+/*
+ * LEDs driver for PCEngines WRAP
+ *
+ * Copyright (C) 2006 Kristian Kielhofner <kris@krisk.org>
+ *
+ * Based on leds-net48xx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <asm/io.h>
+#include <linux/scx200_gpio.h>
+
+#define DRVNAME "wrap-led"
+#define WRAP_ERROR_LED_GPIO	3
+#define	WRAP_EXTRA_LED_GPIO	18
+
+static struct platform_device *pdev;
+
+static void wrap_error_led_set(struct led_classdev *led_cdev,
+		enum led_brightness value)
+{
+	if (value)
+		scx200_gpio_set_low(WRAP_ERROR_LED_GPIO);
+	else
+		scx200_gpio_set_high(WRAP_ERROR_LED_GPIO);
+}
+
+static void wrap_extra_led_set(struct led_classdev *led_cdev,
+		enum led_brightness value)
+{
+	if (value)
+		scx200_gpio_set_low(WRAP_EXTRA_LED_GPIO);
+	else
+		scx200_gpio_set_high(WRAP_EXTRA_LED_GPIO);
+}
+
+static struct led_classdev wrap_error_led = {
+	.name		= "wrap:error",
+	.brightness_set	= wrap_error_led_set,
+};
+
+static struct led_classdev wrap_extra_led = {
+	.name           = "wrap:extra",
+	.brightness_set = wrap_extra_led_set,
+};
+
+#ifdef CONFIG_PM
+static int wrap_led_suspend(struct platform_device *dev,
+		pm_message_t state)
+{
+	led_classdev_suspend(&wrap_error_led);
+	led_classdev_suspend(&wrap_extra_led);
+	return 0;
+}
+
+static int wrap_led_resume(struct platform_device *dev)
+{
+	led_classdev_resume(&wrap_error_led);
+	led_classdev_resume(&wrap_extra_led);
+	return 0;
+}
+#else
+#define wrap_led_suspend NULL
+#define wrap_led_resume NULL
+#endif
+
+static int wrap_led_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = led_classdev_register(&pdev->dev, &wrap_error_led);
+	if (ret == 0) {
+		ret = led_classdev_register(&pdev->dev, &wrap_extra_led);
+		if (ret < 0)
+			led_classdev_unregister(&wrap_error_led);
+	}
+	return ret;
+}
+
+static int wrap_led_remove(struct platform_device *pdev)
+{
+	led_classdev_unregister(&wrap_error_led);
+	led_classdev_unregister(&wrap_extra_led);
+	return 0;
+}
+
+static struct platform_driver wrap_led_driver = {
+	.probe		= wrap_led_probe,
+	.remove		= wrap_led_remove,
+	.suspend	= wrap_led_suspend,
+	.resume		= wrap_led_resume,
+	.driver		= {
+		.name		= DRVNAME,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init wrap_led_init(void)
+{
+	int ret;
+
+	if (!scx200_gpio_present()) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = platform_driver_register(&wrap_led_driver);
+	if (ret < 0)
+		goto out;
+
+	pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
+	if (IS_ERR(pdev)) {
+		ret = PTR_ERR(pdev);
+		platform_driver_unregister(&wrap_led_driver);
+		goto out;
+	}
+
+out:
+	return ret;
+}
+
+static void __exit wrap_led_exit(void)
+{
+	platform_device_unregister(pdev);
+	platform_driver_unregister(&wrap_led_driver);
+}
+
+module_init(wrap_led_init);
+module_exit(wrap_led_exit);
+
+MODULE_AUTHOR("Kristian Kielhofner <kris@krisk.org>");
+MODULE_DESCRIPTION("PCEngines WRAP LED driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index be0bd34..d43ea81 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -267,12 +267,12 @@
 }
 
 static void
-__adb_probe_task(void *data)
+__adb_probe_task(struct work_struct *bullshit)
 {
 	adb_probe_task_pid = kernel_thread(adb_probe_task, NULL, SIGCHLD | CLONE_KERNEL);
 }
 
-static DECLARE_WORK(adb_reset_work, __adb_probe_task, NULL);
+static DECLARE_WORK(adb_reset_work, __adb_probe_task);
 
 int
 adb_reset_bus(void)
diff --git a/drivers/macintosh/apm_emu.c b/drivers/macintosh/apm_emu.c
index 1293876..8862a83 100644
--- a/drivers/macintosh/apm_emu.c
+++ b/drivers/macintosh/apm_emu.c
@@ -529,7 +529,8 @@
 	if (apm_proc)
 		apm_proc->owner = THIS_MODULE;
 
-	misc_register(&apm_device);
+	if (misc_register(&apm_device) != 0)
+		printk(KERN_INFO "Could not create misc. device for apm\n");
 
 	pmu_register_sleep_notifier(&apm_sleep_notifier);
 
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index f1b6f56..5ed41fe 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -48,7 +48,8 @@
 } ____cacheline_aligned;
 
 struct rackmeter_cpu {
-	struct work_struct	sniffer;
+	struct delayed_work	sniffer;
+	struct rackmeter	*rm;
 	cputime64_t		prev_wall;
 	cputime64_t		prev_idle;
 	int			zero;
@@ -208,11 +209,12 @@
 	rackmeter_do_pause(rm, 0);
 }
 
-static void rackmeter_do_timer(void *data)
+static void rackmeter_do_timer(struct work_struct *work)
 {
-	struct rackmeter *rm = data;
+	struct rackmeter_cpu *rcpu =
+		container_of(work, struct rackmeter_cpu, sniffer.work);
+	struct rackmeter *rm = rcpu->rm;
 	unsigned int cpu = smp_processor_id();
-	struct rackmeter_cpu *rcpu = &rm->cpu[cpu];
 	cputime64_t cur_jiffies, total_idle_ticks;
 	unsigned int total_ticks, idle_ticks;
 	int i, offset, load, cumm, pause;
@@ -263,8 +265,10 @@
 	 * on those machines yet
 	 */
 
-	INIT_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer, rm);
-	INIT_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer, rm);
+	rm->cpu[0].rm = rm;
+	INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
+	rm->cpu[1].rm = rm;
+	INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
 
 	for_each_online_cpu(cpu) {
 		struct rackmeter_cpu *rcpu;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 4f724cd..6dde27a 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -601,7 +601,7 @@
  * sysfs visibility
  */
 
-static void smu_expose_childs(void *unused)
+static void smu_expose_childs(struct work_struct *unused)
 {
 	struct device_node *np;
 
@@ -611,7 +611,7 @@
 						  &smu->of_dev->dev);
 }
 
-static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
+static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
 
 static int smu_platform_probe(struct of_device* dev,
 			      const struct of_device_id *match)
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 13b953a..3d3bf16 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -24,6 +24,7 @@
 #include <linux/suspend.h>
 #include <linux/kthread.h>
 #include <linux/moduleparam.h>
+#include <linux/freezer.h>
 
 #include <asm/prom.h>
 #include <asm/machdep.h>
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index e63ea1c..c8558d4 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -42,7 +42,7 @@
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <linux/sysdev.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/syscalls.h>
 #include <linux/cpu.h>
 #include <asm/prom.h>
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index ab3faa7..e947af9 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -34,6 +34,7 @@
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 #include <asm/prom.h>
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 08a40f4..c7bee4f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -101,7 +101,7 @@
 #define MIN_POOL_PAGES 32
 #define MIN_BIO_PAGES  8
 
-static kmem_cache_t *_crypt_io_pool;
+static struct kmem_cache *_crypt_io_pool;
 
 /*
  * Different IV generation algorithms:
@@ -458,11 +458,11 @@
  * interrupt context.
  */
 static struct workqueue_struct *_kcryptd_workqueue;
-static void kcryptd_do_work(void *data);
+static void kcryptd_do_work(struct work_struct *work);
 
 static void kcryptd_queue_io(struct crypt_io *io)
 {
-	INIT_WORK(&io->work, kcryptd_do_work, io);
+	INIT_WORK(&io->work, kcryptd_do_work);
 	queue_work(_kcryptd_workqueue, &io->work);
 }
 
@@ -618,9 +618,9 @@
 	dec_pending(io, crypt_convert(cc, &ctx));
 }
 
-static void kcryptd_do_work(void *data)
+static void kcryptd_do_work(struct work_struct *work)
 {
-	struct crypt_io *io = data;
+	struct crypt_io *io = container_of(work, struct crypt_io, work);
 
 	if (io->post_process)
 		process_read_endio(io);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d754e0b..cf8bf05 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -101,11 +101,11 @@
 
 #define MIN_IOS 256	/* Mempool size */
 
-static kmem_cache_t *_mpio_cache;
+static struct kmem_cache *_mpio_cache;
 
 struct workqueue_struct *kmultipathd;
-static void process_queued_ios(void *data);
-static void trigger_event(void *data);
+static void process_queued_ios(struct work_struct *work);
+static void trigger_event(struct work_struct *work);
 
 
 /*-----------------------------------------------
@@ -173,8 +173,8 @@
 		INIT_LIST_HEAD(&m->priority_groups);
 		spin_lock_init(&m->lock);
 		m->queue_io = 1;
-		INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
-		INIT_WORK(&m->trigger_event, trigger_event, m);
+		INIT_WORK(&m->process_queued_ios, process_queued_ios);
+		INIT_WORK(&m->trigger_event, trigger_event);
 		m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
 		if (!m->mpio_pool) {
 			kfree(m);
@@ -379,9 +379,10 @@
 	}
 }
 
-static void process_queued_ios(void *data)
+static void process_queued_ios(struct work_struct *work)
 {
-	struct multipath *m = (struct multipath *) data;
+	struct multipath *m =
+		container_of(work, struct multipath, process_queued_ios);
 	struct hw_handler *hwh = &m->hw_handler;
 	struct pgpath *pgpath = NULL;
 	unsigned init_required = 0, must_queue = 1;
@@ -421,9 +422,10 @@
  * An event is triggered whenever a path is taken out of use.
  * Includes path failure and PG bypass.
  */
-static void trigger_event(void *data)
+static void trigger_event(struct work_struct *work)
 {
-	struct multipath *m = (struct multipath *) data;
+	struct multipath *m =
+		container_of(work, struct multipath, trigger_event);
 
 	dm_table_event(m->ti->table);
 }
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 48a653b..fc8cbb1 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -883,7 +883,7 @@
 	do_writes(ms, &writes);
 }
 
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
 {
 	struct mirror_set *ms;
 
@@ -1269,7 +1269,7 @@
 		dm_dirty_log_exit();
 		return r;
 	}
-	INIT_WORK(&_kmirrord_work, do_work, NULL);
+	INIT_WORK(&_kmirrord_work, do_work);
 
 	r = dm_register_target(&mirror_target);
 	if (r < 0) {
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 5281e00..b0ce2ce 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,7 +40,7 @@
 #define SNAPSHOT_PAGES 256
 
 struct workqueue_struct *ksnapd;
-static void flush_queued_bios(void *data);
+static void flush_queued_bios(struct work_struct *work);
 
 struct pending_exception {
 	struct exception e;
@@ -88,8 +88,8 @@
  * Hash table mapping origin volumes to lists of snapshots and
  * a lock to protect it
  */
-static kmem_cache_t *exception_cache;
-static kmem_cache_t *pending_cache;
+static struct kmem_cache *exception_cache;
+static struct kmem_cache *pending_cache;
 static mempool_t *pending_pool;
 
 /*
@@ -228,7 +228,7 @@
 	return 0;
 }
 
-static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
+static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
 {
 	struct list_head *slot;
 	struct exception *ex, *next;
@@ -528,7 +528,7 @@
 	}
 
 	bio_list_init(&s->queued_bios);
-	INIT_WORK(&s->queued_bios_work, flush_queued_bios, s);
+	INIT_WORK(&s->queued_bios_work, flush_queued_bios);
 
 	/* Add snapshot to the list of snapshots for this origin */
 	/* Exceptions aren't triggered till snapshot_resume() is called */
@@ -603,9 +603,10 @@
 	}
 }
 
-static void flush_queued_bios(void *data)
+static void flush_queued_bios(struct work_struct *work)
 {
-	struct dm_snapshot *s = (struct dm_snapshot *) data;
+	struct dm_snapshot *s =
+		container_of(work, struct dm_snapshot, queued_bios_work);
 	struct bio *queued_bios;
 	unsigned long flags;
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fc4f743..7ec1b11 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -121,8 +121,8 @@
 };
 
 #define MIN_IOS 256
-static kmem_cache_t *_io_cache;
-static kmem_cache_t *_tio_cache;
+static struct kmem_cache *_io_cache;
+static struct kmem_cache *_tio_cache;
 
 static int __init local_init(void)
 {
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index f1db6ef..b46f6c5 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -203,7 +203,7 @@
 /* FIXME: this should scale with the number of pages */
 #define MIN_JOBS 512
 
-static kmem_cache_t *_job_cache;
+static struct kmem_cache *_job_cache;
 static mempool_t *_job_pool;
 
 /*
@@ -417,7 +417,7 @@
 /*
  * kcopyd does this every time it's woken up.
  */
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
 {
 	/*
 	 * The order that these are called is *very* important.
@@ -628,7 +628,7 @@
 	}
 
 	kcopyd_clients++;
-	INIT_WORK(&_kcopyd_work, do_work, NULL);
+	INIT_WORK(&_kcopyd_work, do_work);
 	mutex_unlock(&kcopyd_init_lock);
 	return 0;
 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cbf9c9..6c4345b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -39,10 +39,10 @@
 #include <linux/raid/bitmap.h>
 #include <linux/sysctl.h>
 #include <linux/buffer_head.h> /* for invalidate_bdev */
-#include <linux/suspend.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
 #include <linux/ctype.h>
+#include <linux/freezer.h>
 
 #include <linux/init.h>
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 69c3e20..52914d5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -348,7 +348,7 @@
 
 static int grow_stripes(raid5_conf_t *conf, int num)
 {
-	kmem_cache_t *sc;
+	struct kmem_cache *sc;
 	int devs = conf->raid_disks;
 
 	sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
@@ -397,7 +397,7 @@
 	LIST_HEAD(newstripes);
 	struct disk_info *ndisks;
 	int err = 0;
-	kmem_cache_t *sc;
+	struct kmem_cache *sc;
 	int i;
 
 	if (newsize <= conf->pool_size)
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c
index 0689324..6e16680 100644
--- a/drivers/media/dvb/b2c2/flexcop-pci.c
+++ b/drivers/media/dvb/b2c2/flexcop-pci.c
@@ -63,7 +63,7 @@
 
 	unsigned long last_irq;
 
-	struct work_struct irq_check_work;
+	struct delayed_work irq_check_work;
 
 	struct flexcop_device *fc_dev;
 };
@@ -97,9 +97,10 @@
 	return 0;
 }
 
-static void flexcop_pci_irq_check_work(void *data)
+static void flexcop_pci_irq_check_work(struct work_struct *work)
 {
-	struct flexcop_pci *fc_pci = data;
+	struct flexcop_pci *fc_pci =
+		container_of(work, struct flexcop_pci, irq_check_work.work);
 	struct flexcop_device *fc = fc_pci->fc_dev;
 
 	flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714);
@@ -371,7 +372,7 @@
 	if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
 		goto err_fc_exit;
 
-	INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci);
+	INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
 
 	return ret;
 
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index 8a7dd50..9123147 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -128,7 +128,7 @@
 
 	struct dvbt_set_parameters_msg param;
 	struct dvbt_get_status_msg status;
-	struct work_struct query_work;
+	struct delayed_work query_work;
 
 	wait_queue_head_t poll_wq;
 	int pending_fe_events;
@@ -142,7 +142,7 @@
 #ifdef ENABLE_RC
 	struct input_dev *rc_input_dev;
 	char phys[64];
-	struct work_struct rc_query_work;
+	struct delayed_work rc_query_work;
 	int rc_input_event;
 	u32 rc_last_code;
 	unsigned long last_event_jiffies;
@@ -287,7 +287,7 @@
 	int i;
 
 	cinergyt2->streambuf = usb_buffer_alloc(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE,
-					      SLAB_KERNEL, &cinergyt2->streambuf_dmahandle);
+					      GFP_KERNEL, &cinergyt2->streambuf_dmahandle);
 	if (!cinergyt2->streambuf) {
 		dprintk(1, "failed to alloc consistent stream memory area, bailing out!\n");
 		return -ENOMEM;
@@ -723,9 +723,10 @@
 
 #ifdef ENABLE_RC
 
-static void cinergyt2_query_rc (void *data)
+static void cinergyt2_query_rc (struct work_struct *work)
 {
-	struct cinergyt2 *cinergyt2 = data;
+	struct cinergyt2 *cinergyt2 =
+		container_of(work, struct cinergyt2, rc_query_work.work);
 	char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
 	struct cinergyt2_rc_event rc_events[12];
 	int n, len, i;
@@ -806,7 +807,7 @@
 	strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
 	cinergyt2->rc_input_event = KEY_MAX;
 	cinergyt2->rc_last_code = ~0;
-	INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2);
+	INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
 
 	input_dev->name = DRIVER_NAME " remote control";
 	input_dev->phys = cinergyt2->phys;
@@ -847,9 +848,10 @@
 
 #endif /* ENABLE_RC */
 
-static void cinergyt2_query (void *data)
+static void cinergyt2_query (struct work_struct *work)
 {
-	struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data;
+	struct cinergyt2 *cinergyt2 =
+		container_of(work, struct cinergyt2, query_work.work);
 	char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
 	struct dvbt_get_status_msg *s = &cinergyt2->status;
 	uint8_t lock_bits;
@@ -893,7 +895,7 @@
 
 	mutex_init(&cinergyt2->sem);
 	init_waitqueue_head (&cinergyt2->poll_wq);
-	INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2);
+	INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
 
 	cinergyt2->udev = interface_to_usbdev(intf);
 	cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index a2ab2ee..e859722 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -34,7 +34,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/list.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/jiffies.h>
 #include <asm/processor.h>
 
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8859ab7..ebf4dc5 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -127,6 +127,7 @@
 	int in_use;
 	struct net_device_stats stats;
 	u16 pid;
+	struct net_device *net;
 	struct dvb_net *host;
 	struct dmx_demux *demux;
 	struct dmx_section_feed *secfeed;
@@ -1123,10 +1124,11 @@
 }
 
 
-static void wq_set_multicast_list (void *data)
+static void wq_set_multicast_list (struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct dvb_net_priv *priv = dev->priv;
+	struct dvb_net_priv *priv =
+		container_of(work, struct dvb_net_priv, set_multicast_list_wq);
+	struct net_device *dev = priv->net;
 
 	dvb_net_feed_stop(dev);
 	priv->rx_mode = RX_MODE_UNI;
@@ -1167,9 +1169,11 @@
 }
 
 
-static void wq_restart_net_feed (void *data)
+static void wq_restart_net_feed (struct work_struct *work)
 {
-	struct net_device *dev = data;
+	struct dvb_net_priv *priv =
+		container_of(work, struct dvb_net_priv, restart_net_feed_wq);
+	struct net_device *dev = priv->net;
 
 	if (netif_running(dev)) {
 		dvb_net_feed_stop(dev);
@@ -1276,6 +1280,7 @@
 	dvbnet->device[if_num] = net;
 
 	priv = net->priv;
+	priv->net = net;
 	priv->demux = dvbnet->demux;
 	priv->pid = pid;
 	priv->rx_mode = RX_MODE_UNI;
@@ -1284,8 +1289,8 @@
 	priv->feedtype = feedtype;
 	reset_ule(priv);
 
-	INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net);
-	INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net);
+	INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
+	INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
 	mutex_init(&priv->mutex);
 
 	net->base_addr = pid;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 0a3a0b6..794e447 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -13,9 +13,10 @@
  *
  * TODO: Fix the repeat rate of the input device.
  */
-static void dvb_usb_read_remote_control(void *data)
+static void dvb_usb_read_remote_control(struct work_struct *work)
 {
-	struct dvb_usb_device *d = data;
+	struct dvb_usb_device *d =
+		container_of(work, struct dvb_usb_device, rc_query_work.work);
 	u32 event;
 	int state;
 
@@ -128,7 +129,7 @@
 
 	input_register_device(d->rc_input_dev);
 
-	INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d);
+	INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control);
 
 	info("schedule remote query interval to %d msecs.", d->props.rc_interval);
 	schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval));
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 376c45a..0d72173 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -369,7 +369,7 @@
 	/* remote control */
 	struct input_dev *rc_input_dev;
 	char rc_phys[64];
-	struct work_struct rc_query_work;
+	struct delayed_work rc_query_work;
 	u32 last_event;
 	int last_state;
 
diff --git a/drivers/media/dvb/dvb-usb/usb-urb.c b/drivers/media/dvb/dvb-usb/usb-urb.c
index 78035ee..397f51a 100644
--- a/drivers/media/dvb/dvb-usb/usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/usb-urb.c
@@ -116,7 +116,7 @@
 	for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) {
 		deb_mem("allocating buffer %d\n",stream->buf_num);
 		if (( stream->buf_list[stream->buf_num] =
-					usb_buffer_alloc(stream->udev, size, SLAB_ATOMIC,
+					usb_buffer_alloc(stream->udev, size, GFP_ATOMIC,
 					&stream->dma_addr[stream->buf_num]) ) == NULL) {
 			deb_mem("not enough memory for urb-buffer allocation.\n");
 			usb_free_stream_buffers(stream);
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c
index f3bc82e..1aeacb1 100644
--- a/drivers/media/dvb/frontends/l64781.c
+++ b/drivers/media/dvb/frontends/l64781.c
@@ -36,7 +36,7 @@
 	struct dvb_frontend frontend;
 
 	/* private demodulator data */
-	int first:1;
+	unsigned int first:1;
 };
 
 #define dprintk(args...) \
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index 8135f3e..10b121a 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -1244,7 +1244,7 @@
 			return -ENOMEM;
 		}
 		dec->irq_buffer = usb_buffer_alloc(dec->udev,IRQ_PACKET_SIZE,
-					SLAB_ATOMIC, &dec->irq_dma_handle);
+					GFP_ATOMIC, &dec->irq_dma_handle);
 		if(!dec->irq_buffer) {
 			return -ENOMEM;
 		}
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 6d96b17..920b63f 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -173,38 +173,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called radio-maestro.
 
-config RADIO_MIROPCM20
-	tristate "miroSOUND PCM20 radio"
-	depends on ISA && VIDEO_V4L1 && SOUND_ACI_MIXER
-	---help---
-	  Choose Y here if you have this FM radio card. You also need to say Y
-	  to "ACI mixer (miroSOUND PCM1-pro/PCM12/PCM20 radio)" (in "Sound")
-	  for this to work.
-
-	  In order to control your radio card, you will need to use programs
-	  that are compatible with the Video For Linux API.  Information on
-	  this API and pointers to "v4l" programs may be found at
-	  <file:Documentation/video4linux/API.html>.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called miropcm20.
-
-config RADIO_MIROPCM20_RDS
-	tristate "miroSOUND PCM20 radio RDS user interface (EXPERIMENTAL)"
-	depends on RADIO_MIROPCM20 && EXPERIMENTAL
-	---help---
-	  Choose Y here if you want to see RDS/RBDS information like
-	  RadioText, Programme Service name, Clock Time and date, Programme
-	  Type and Traffic Announcement/Programme identification.
-
-	  It's not possible to read the raw RDS packets from the device, so
-	  the driver cant provide an V4L interface for this.  But the
-	  availability of RDS is reported over V4L by the basic driver
-	  already.  Here RDS can be read from files in /dev/v4l/rds.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called miropcm20-rds.
-
 config RADIO_SF16FMI
 	tristate "SF16FMI Radio"
 	depends on ISA && VIDEO_V4L2
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index 41f4b8d..b12cec9 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -82,6 +82,8 @@
 	struct pardevice *pdev;
 	struct parport *port;
 	struct work_struct cb_task;
+	void (*cb_func)(void *cbdata);
+	void *cb_data;
 	int open_count;
 	wait_queue_head_t wq_stream;
 	/* image state flags */
@@ -130,6 +132,20 @@
 #define PARPORT_CHUNK_SIZE	PAGE_SIZE
 
 
+static void cpia_pp_run_callback(struct work_struct *work)
+{
+	void (*cb_func)(void *cbdata);
+	void *cb_data;
+	struct pp_cam_entry *cam;
+
+	cam = container_of(work, struct pp_cam_entry, cb_task);
+	cb_func = cam->cb_func;
+	cb_data = cam->cb_data;
+	work_release(work);
+
+	cb_func(cb_data);
+}
+
 /****************************************************************************
  *
  *  CPiA-specific  low-level parport functions for nibble uploads
@@ -664,7 +680,9 @@
 	int retval = 0;
 
 	if(cam->port->irq != PARPORT_IRQ_NONE) {
-		INIT_WORK(&cam->cb_task, cb, cbdata);
+		cam->cb_func = cb;
+		cam->cb_data = cbdata;
+		INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback);
 	} else {
 		retval = -1;
 	}
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 57e1c02..e60a0a5 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -145,9 +145,9 @@
 	schedule_work(&ir->work);
 }
 
-static void cx88_ir_work(void *data)
+static void cx88_ir_work(struct work_struct *work)
 {
-	struct cx88_IR *ir = data;
+	struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
 	unsigned long timeout;
 
 	cx88_ir_handle_key(ir);
@@ -308,7 +308,7 @@
 	core->ir = ir;
 
 	if (ir->polling) {
-		INIT_WORK(&ir->work, cx88_ir_work, ir);
+		INIT_WORK(&ir->work, cx88_ir_work);
 		init_timer(&ir->timer);
 		ir->timer.function = ir_timer;
 		ir->timer.data = (unsigned long)ir;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 1457b16..ab87e7b 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -268,9 +268,9 @@
 	schedule_work(&ir->work);
 }
 
-static void ir_work(void *data)
+static void ir_work(struct work_struct *work)
 {
-	struct IR_i2c *ir = data;
+	struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
 	ir_key_poll(ir);
 	mod_timer(&ir->timer, jiffies+HZ/10);
 }
@@ -400,7 +400,7 @@
 	       ir->input->name,ir->input->phys,adap->name);
 
 	/* start polling via eventd */
-	INIT_WORK(&ir->work, ir_work, ir);
+	INIT_WORK(&ir->work, ir_work);
 	init_timer(&ir->timer);
 	ir->timer.function = ir_timer;
 	ir->timer.data     = (unsigned long)ir;
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index cf43df3..e1b56dc 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -56,7 +56,7 @@
 #include <media/tvaudio.h>
 #include <media/msp3400.h>
 #include <linux/kthread.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include "msp3400-driver.h"
 
 /* ---------------------------------------------------------------------- */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
index f129f31..cf12974 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -45,16 +45,21 @@
 }
 
 
-static void pvr2_context_poll(struct pvr2_context *mp)
+static void pvr2_context_poll(struct work_struct *work)
 {
+	struct pvr2_context *mp =
+		container_of(work, struct pvr2_context, workpoll);
 	pvr2_context_enter(mp); do {
 		pvr2_hdw_poll(mp->hdw);
 	} while (0); pvr2_context_exit(mp);
 }
 
 
-static void pvr2_context_setup(struct pvr2_context *mp)
+static void pvr2_context_setup(struct work_struct *work)
 {
+	struct pvr2_context *mp =
+		container_of(work, struct pvr2_context, workinit);
+
 	pvr2_context_enter(mp); do {
 		if (!pvr2_hdw_dev_ok(mp->hdw)) break;
 		pvr2_hdw_setup(mp->hdw);
@@ -92,8 +97,8 @@
 	}
 
 	mp->workqueue = create_singlethread_workqueue("pvrusb2");
-	INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp);
-	INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp);
+	INIT_WORK(&mp->workinit, pvr2_context_setup);
+	INIT_WORK(&mp->workpoll, pvr2_context_poll);
 	queue_work(mp->workqueue,&mp->workinit);
  done:
 	return mp;
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index 7b9859c..92eabf8 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -324,9 +324,9 @@
 	schedule_work(&s->work);
 }
 
-static void saa6588_work(void *data)
+static void saa6588_work(struct work_struct *work)
 {
-	struct saa6588 *s = (struct saa6588 *)data;
+	struct saa6588 *s = container_of(work, struct saa6588, work);
 
 	saa6588_i2c_poll(s);
 	mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
@@ -419,7 +419,7 @@
 	saa6588_configure(s);
 
 	/* start polling via eventd */
-	INIT_WORK(&s->work, saa6588_work, s);
+	INIT_WORK(&s->work, saa6588_work);
 	init_timer(&s->timer);
 	s->timer.function = saa6588_timer;
 	s->timer.data = (unsigned long)s;
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 65d0440..daaae87 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -343,9 +343,10 @@
 	.minor	       = -1,
 };
 
-static void empress_signal_update(void* data)
+static void empress_signal_update(struct work_struct *work)
 {
-	struct saa7134_dev* dev = (struct saa7134_dev*) data;
+	struct saa7134_dev* dev =
+		container_of(work, struct saa7134_dev, empress_workqueue);
 
 	if (dev->nosignal) {
 		dprintk("no video signal\n");
@@ -378,7 +379,7 @@
 		 "%s empress (%s)", dev->name,
 		 saa7134_boards[dev->board].name);
 
-	INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev);
+	INIT_WORK(&dev->empress_workqueue, empress_signal_update);
 
 	err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
 				    empress_nr[dev->nr]);
@@ -399,7 +400,7 @@
 			    sizeof(struct saa7134_buf),
 			    dev);
 
-	empress_signal_update(dev);
+	empress_signal_update(&dev->empress_workqueue);
 	return 0;
 }
 
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index fcaef4b..d506dfa 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -29,6 +29,7 @@
 #include <linux/init.h>
 #include <linux/smp_lock.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <media/tvaudio.h>
 #include <media/v4l2-common.h>
diff --git a/drivers/media/video/video-buf-dvb.c b/drivers/media/video/video-buf-dvb.c
index f53edf1..fcc5467 100644
--- a/drivers/media/video/video-buf-dvb.c
+++ b/drivers/media/video/video-buf-dvb.c
@@ -20,7 +20,7 @@
 #include <linux/fs.h>
 #include <linux/kthread.h>
 #include <linux/file.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 
 #include <media/video-buf.h>
 #include <media/video-buf-dvb.h>
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 3c8dc72..9986de5 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -36,6 +36,7 @@
 #include <media/v4l2-common.h>
 #include <linux/kthread.h>
 #include <linux/highmem.h>
+#include <linux/freezer.h>
 
 /* Wake up at about 30 fps */
 #define WAKE_NUMERATOR 30
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 051b7c5..6e068cf 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -347,7 +347,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
  *	@irq: irq number (not used)
  *	@bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
@@ -387,14 +387,16 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mpt_base_reply - MPT base driver's callback routine; all base driver
- *	"internal" request/reply processing is routed here.
- *	Currently used for EventNotification and EventAck handling.
+/**
+ *	mpt_base_reply - MPT base driver's callback routine
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@mf: Pointer to original MPT request frame
  *	@reply: Pointer to MPT reply frame (NULL if TurboReply)
  *
+ *	MPT base driver's callback routine; all base driver
+ *	"internal" request/reply processing is routed here.
+ *	Currently used for EventNotification and EventAck handling.
+ *
  *	Returns 1 indicating original alloc'd request frame ptr
  *	should be freed, or 0 if it shouldn't.
  */
@@ -530,7 +532,7 @@
  *	@dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
  *
  *	This routine is called by a protocol-specific driver (SCSI host,
- *	LAN, SCSI target) to register it's reply callback routine.  Each
+ *	LAN, SCSI target) to register its reply callback routine.  Each
  *	protocol-specific driver must do this before it will be able to
  *	use any IOC resources, such as obtaining request frames.
  *
@@ -572,7 +574,7 @@
  *	mpt_deregister - Deregister a protocol drivers resources.
  *	@cb_idx: previously registered callback handle
  *
- *	Each protocol-specific driver should call this routine when it's
+ *	Each protocol-specific driver should call this routine when its
  *	module is unloaded.
  */
 void
@@ -617,7 +619,7 @@
  *
  *	Each protocol-specific driver should call this routine
  *	when it does not (or can no longer) handle events,
- *	or when it's module is unloaded.
+ *	or when its module is unloaded.
  */
 void
 mpt_event_deregister(int cb_idx)
@@ -656,7 +658,7 @@
  *
  *	Each protocol-specific driver should call this routine
  *	when it does not (or can no longer) handle IOC reset handling,
- *	or when it's module is unloaded.
+ *	or when its module is unloaded.
  */
 void
 mpt_reset_deregister(int cb_idx)
@@ -670,6 +672,8 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mpt_device_driver_register - Register device driver hooks
+ *	@dd_cbfunc: driver callbacks struct
+ *	@cb_idx: MPT protocol driver index
  */
 int
 mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
@@ -696,6 +700,7 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mpt_device_driver_deregister - DeRegister device driver hooks
+ *	@cb_idx: MPT protocol driver index
  */
 void
 mpt_device_driver_deregister(int cb_idx)
@@ -887,8 +892,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mpt_send_handshake_request - Send MPT request via doorbell
- *	handshake method.
+ *	mpt_send_handshake_request - Send MPT request via doorbell handshake method.
  *	@handle: Handle of registered MPT protocol driver
  *	@ioc: Pointer to MPT adapter structure
  *	@reqBytes: Size of the request in bytes
@@ -981,10 +985,13 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- * mpt_host_page_access_control - provides mechanism for the host
- * driver to control the IOC's Host Page Buffer access.
+ * mpt_host_page_access_control - control the IOC's Host Page Buffer access
  * @ioc: Pointer to MPT adapter structure
  * @access_control_value: define bits below
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Provides mechanism for the host driver to control the IOC's
+ * Host Page Buffer access.
  *
  * Access Control Value - bits[15:12]
  * 0h Reserved
@@ -1022,10 +1029,10 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mpt_host_page_alloc - allocate system memory for the fw
- *	If we already allocated memory in past, then resend the same pointer.
- *	ioc@: Pointer to pointer to IOC adapter
- *	ioc_init@: Pointer to ioc init config page
+ *	@ioc: Pointer to pointer to IOC adapter
+ *	@ioc_init: Pointer to ioc init config page
  *
+ *	If we already allocated memory in past, then resend the same pointer.
  *	Returns 0 for success, non-zero for failure.
  */
 static int
@@ -1091,12 +1098,15 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mpt_verify_adapter - Given a unique IOC identifier, set pointer to
- *	the associated MPT adapter structure.
+ *	mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure.
  *	@iocid: IOC unique identifier (integer)
  *	@iocpp: Pointer to pointer to IOC adapter
  *
- *	Returns iocid and sets iocpp.
+ *	Given a unique IOC identifier, set pointer to the associated MPT
+ *	adapter structure.
+ *
+ *	Returns iocid and sets iocpp if iocid is found.
+ *	Returns -1 if iocid is not found.
  */
 int
 mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
@@ -1115,9 +1125,10 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_attach - Install a PCI intelligent MPT adapter.
  *	@pdev: Pointer to pci_dev structure
+ *	@id: PCI device ID information
  *
  *	This routine performs all the steps necessary to bring the IOC of
  *	a MPT adapter to a OPERATIONAL state.  This includes registering
@@ -1417,10 +1428,9 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_detach - Remove a PCI intelligent MPT adapter.
  *	@pdev: Pointer to pci_dev structure
- *
  */
 
 void
@@ -1466,10 +1476,10 @@
  */
 #ifdef CONFIG_PM
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_suspend - Fusion MPT base driver suspend routine.
- *
- *
+ *	@pdev: Pointer to pci_dev structure
+ *	@state: new state to enter
  */
 int
 mpt_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -1505,10 +1515,9 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_resume - Fusion MPT base driver resume routine.
- *
- *
+ *	@pdev: Pointer to pci_dev structure
  */
 int
 mpt_resume(struct pci_dev *pdev)
@@ -1566,7 +1575,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_do_ioc_recovery - Initialize or recover MPT adapter.
  *	@ioc: Pointer to MPT adapter structure
  *	@reason: Event word / reason
@@ -1892,13 +1901,15 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mpt_detect_bound_ports - Search for PCI bus/dev_function
- *	which matches PCI bus/dev_function (+/-1) for newly discovered 929,
- *	929X, 1030 or 1035.
+/**
+ *	mpt_detect_bound_ports - Search for matching PCI bus/dev_function
  *	@ioc: Pointer to MPT adapter structure
  *	@pdev: Pointer to (struct pci_dev) structure
  *
+ *	Search for PCI bus/dev_function which matches
+ *	PCI bus/dev_function (+/-1) for newly discovered 929,
+ *	929X, 1030 or 1035.
+ *
  *	If match on PCI dev_function +/-1 is found, bind the two MPT adapters
  *	using alt_ioc pointer fields in their %MPT_ADAPTER structures.
  */
@@ -1945,9 +1956,9 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_adapter_disable - Disable misbehaving MPT adapter.
- *	@this: Pointer to MPT adapter structure
+ *	@ioc: Pointer to MPT adapter structure
  */
 static void
 mpt_adapter_disable(MPT_ADAPTER *ioc)
@@ -2046,9 +2057,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mpt_adapter_dispose - Free all resources associated with a MPT
- *	adapter.
+/**
+ *	mpt_adapter_dispose - Free all resources associated with an MPT adapter
  *	@ioc: Pointer to MPT adapter structure
  *
  *	This routine unregisters h/w resources and frees all alloc'd memory
@@ -2099,8 +2109,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	MptDisplayIocCapabilities - Disply IOC's capacilities.
+/**
+ *	MptDisplayIocCapabilities - Disply IOC's capabilities.
  *	@ioc: Pointer to MPT adapter structure
  */
 static void
@@ -2142,7 +2152,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	MakeIocReady - Get IOC to a READY state, using KickStart if needed.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@force: Force hard KickStart of IOC
@@ -2279,7 +2289,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_GetIocState - Get the current state of a MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@cooked: Request raw or cooked IOC state
@@ -2304,7 +2314,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	GetIocFacts - Send IOCFacts request to MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@sleepFlag: Specifies whether the process can sleep
@@ -2478,7 +2488,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	GetPortFacts - Send PortFacts request to MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@portnum: Port number
@@ -2545,7 +2555,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	SendIocInit - Send IOCInit request to MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@sleepFlag: Specifies whether the process can sleep
@@ -2630,7 +2640,7 @@
 	}
 
 	/* No need to byte swap the multibyte fields in the reply
-	 * since we don't even look at it's contents.
+	 * since we don't even look at its contents.
 	 */
 
 	dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n",
@@ -2672,7 +2682,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	SendPortEnable - Send PortEnable request to MPT adapter port.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@portnum: Port number to enable
@@ -2723,9 +2733,13 @@
 	return rc;
 }
 
-/*
- *	ioc: Pointer to MPT_ADAPTER structure
- *      size - total FW bytes
+/**
+ *	mpt_alloc_fw_memory - allocate firmware memory
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *      @size: total FW bytes
+ *
+ *	If memory has already been allocated, the same (cached) value
+ *	is returned.
  */
 void
 mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
@@ -2742,9 +2756,12 @@
 			ioc->alloc_total += size;
 	}
 }
-/*
- * If alt_img is NULL, delete from ioc structure.
- * Else, delete a secondary image in same format.
+/**
+ *	mpt_free_fw_memory - free firmware memory
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *
+ *	If alt_img is NULL, delete from ioc structure.
+ *	Else, delete a secondary image in same format.
  */
 void
 mpt_free_fw_memory(MPT_ADAPTER *ioc)
@@ -2763,7 +2780,7 @@
 
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@sleepFlag: Specifies whether the process can sleep
@@ -2865,10 +2882,10 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_downloadboot - DownloadBoot code
  *	@ioc: Pointer to MPT_ADAPTER structure
- *	@flag: Specify which part of IOC memory is to be uploaded.
+ *	@pFwHeader: Pointer to firmware header info
  *	@sleepFlag: Specifies whether the process can sleep
  *
  *	FwDownloadBoot requires Programmed IO access.
@@ -3071,7 +3088,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	KickStart - Perform hard reset of MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@force: Force hard reset
@@ -3145,12 +3162,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_diag_reset - Perform hard reset of the adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@ignore: Set if to honor and clear to ignore
  *		the reset history bit
- *	@sleepflag: CAN_SLEEP if called in a non-interrupt thread,
+ *	@sleepFlag: CAN_SLEEP if called in a non-interrupt thread,
  *		else set to NO_SLEEP (use mdelay instead)
  *
  *	This routine places the adapter in diagnostic mode via the
@@ -3436,11 +3453,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	SendIocReset - Send IOCReset request to MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@reset_type: reset type, expected values are
  *	%MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET
+ *	@sleepFlag: Specifies whether the process can sleep
  *
  *	Send IOCReset request to the MPT adapter.
  *
@@ -3494,11 +3512,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	initChainBuffers - Allocate memory for and initialize
- *	chain buffers, chain buffer control arrays and spinlock.
- *	@hd: Pointer to MPT_SCSI_HOST structure
- *	@init: If set, initialize the spin lock.
+/**
+ *	initChainBuffers - Allocate memory for and initialize chain buffers
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *
+ *	Allocates memory for and initializes chain buffers,
+ *	chain buffer control arrays and spinlock.
  */
 static int
 initChainBuffers(MPT_ADAPTER *ioc)
@@ -3594,7 +3613,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	PrimeIocFifos - Initialize IOC request and reply FIFOs.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *
@@ -3891,15 +3910,15 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	WaitForDoorbellAck - Wait for IOC to clear the IOP_DOORBELL_STATUS bit
- *	in it's IntStatus register.
+/**
+ *	WaitForDoorbellAck - Wait for IOC doorbell handshake acknowledge
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@howlong: How long to wait (in seconds)
  *	@sleepFlag: Specifies whether the process can sleep
  *
  *	This routine waits (up to ~2 seconds max) for IOC doorbell
- *	handshake ACKnowledge.
+ *	handshake ACKnowledge, indicated by the IOP_DOORBELL_STATUS
+ *	bit in its IntStatus register being clear.
  *
  *	Returns a negative value on failure, else wait loop count.
  */
@@ -3942,14 +3961,14 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	WaitForDoorbellInt - Wait for IOC to set the HIS_DOORBELL_INTERRUPT bit
- *	in it's IntStatus register.
+/**
+ *	WaitForDoorbellInt - Wait for IOC to set its doorbell interrupt bit
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@howlong: How long to wait (in seconds)
  *	@sleepFlag: Specifies whether the process can sleep
  *
- *	This routine waits (up to ~2 seconds max) for IOC doorbell interrupt.
+ *	This routine waits (up to ~2 seconds max) for IOC doorbell interrupt
+ *	(MPI_HIS_DOORBELL_INTERRUPT) to be set in the IntStatus register.
  *
  *	Returns a negative value on failure, else wait loop count.
  */
@@ -3991,8 +4010,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	WaitForDoorbellReply - Wait for and capture a IOC handshake reply.
+/**
+ *	WaitForDoorbellReply - Wait for and capture an IOC handshake reply.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@howlong: How long to wait (in seconds)
  *	@sleepFlag: Specifies whether the process can sleep
@@ -4077,7 +4096,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	GetLanConfigPages - Fetch LANConfig pages.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *
@@ -4188,12 +4207,9 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mptbase_sas_persist_operation - Perform operation on SAS Persitent Table
+/**
+ *	mptbase_sas_persist_operation - Perform operation on SAS Persistent Table
  *	@ioc: Pointer to MPT_ADAPTER structure
- *	@sas_address: 64bit SAS Address for operation.
- *	@target_id: specified target for operation
- *	@bus: specified bus for operation
  *	@persist_opcode: see below
  *
  *	MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
@@ -4202,7 +4218,7 @@
  *
  *	NOTE: Don't use not this function during interrupt time.
  *
- *	Returns: 0 for success, non-zero error
+ *	Returns 0 for success, non-zero error
  */
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -4399,7 +4415,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	GetIoUnitPage2 - Retrieve BIOS version and boot order information.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *
@@ -4457,7 +4473,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*	mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
+/**
+ *	mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
  *	@ioc: Pointer to a Adapter Strucutre
  *	@portnum: IOC port number
  *
@@ -4644,7 +4661,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*	mpt_readScsiDevicePageHeaders - save version and length of SDP1
+/**
+ *	mpt_readScsiDevicePageHeaders - save version and length of SDP1
  *	@ioc: Pointer to a Adapter Strucutre
  *	@portnum: IOC port number
  *
@@ -4996,9 +5014,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	SendEventNotification - Send EventNotification (on or off) request
- *	to MPT adapter.
+/**
+ *	SendEventNotification - Send EventNotification (on or off) request to adapter
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@EvSwitch: Event switch flags
  */
@@ -5062,8 +5079,8 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mpt_config - Generic function to issue config message
- *	@ioc - Pointer to an adapter structure
- *	@cfg - Pointer to a configuration structure. Struct contains
+ *	@ioc:   Pointer to an adapter structure
+ *	@pCfg:  Pointer to a configuration structure. Struct contains
  *		action, page address, direction, physical address
  *		and pointer to a configuration page header
  *		Page header is updated.
@@ -5188,8 +5205,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mpt_timer_expired - Call back for timer process.
+/**
+ *	mpt_timer_expired - Callback for timer process.
  *	Used only internal config functionality.
  *	@data: Pointer to MPT_SCSI_HOST recast as an unsigned long
  */
@@ -5214,12 +5231,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_ioc_reset - Base cleanup for hard reset
  *	@ioc: Pointer to the adapter structure
  *	@reset_phase: Indicates pre- or post-reset functionality
  *
- *	Remark: Free's resources with internally generated commands.
+ *	Remark: Frees resources with internally generated commands.
  */
 static int
 mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
@@ -5271,7 +5288,7 @@
  *	procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff...
  */
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries.
  *
  *	Returns 0 for success, non-zero for failure.
@@ -5297,7 +5314,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries.
  *
  *	Returns 0 for success, non-zero for failure.
@@ -5311,16 +5328,16 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	procmpt_summary_read - Handle read request from /proc/mpt/summary
- *	or from /proc/mpt/iocN/summary.
+/**
+ *	procmpt_summary_read - Handle read request of a summary file
  *	@buf: Pointer to area to write information
  *	@start: Pointer to start pointer
  *	@offset: Offset to start writing
- *	@request:
+ *	@request: Amount of read data requested
  *	@eof: Pointer to EOF integer
  *	@data: Pointer
  *
+ *	Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary.
  *	Returns number of characters written to process performing the read.
  */
 static int
@@ -5355,12 +5372,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	procmpt_version_read - Handle read request from /proc/mpt/version.
  *	@buf: Pointer to area to write information
  *	@start: Pointer to start pointer
  *	@offset: Offset to start writing
- *	@request:
+ *	@request: Amount of read data requested
  *	@eof: Pointer to EOF integer
  *	@data: Pointer
  *
@@ -5411,12 +5428,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	procmpt_iocinfo_read - Handle read request from /proc/mpt/iocN/info.
  *	@buf: Pointer to area to write information
  *	@start: Pointer to start pointer
  *	@offset: Offset to start writing
- *	@request:
+ *	@request: Amount of read data requested
  *	@eof: Pointer to EOF integer
  *	@data: Pointer
  *
@@ -5577,16 +5594,17 @@
  */
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mpt_HardResetHandler - Generic reset handler, issue SCSI Task
- *	Management call based on input arg values.  If TaskMgmt fails,
- *	return associated SCSI request.
+ *	mpt_HardResetHandler - Generic reset handler
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@sleepFlag: Indicates if sleep or schedule must be called.
  *
+ *	Issues SCSI Task Management call based on input arg values.
+ *	If TaskMgmt fails, returns associated SCSI request.
+ *
  *	Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
  *	or a non-interrupt thread.  In the former, must not call schedule().
  *
- *	Remark: A return of -1 is a FATAL error case, as it means a
+ *	Note: A return of -1 is a FATAL error case, as it means a
  *	FW reload/initialization failed.
  *
  *	Returns 0 for SUCCESS or -1 if FAILED.
@@ -5935,13 +5953,14 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	ProcessEventNotification - Route a received EventNotificationReply to
- *	all currently regeistered event handlers.
+/**
+ *	ProcessEventNotification - Route EventNotificationReply to all event handlers
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@pEventReply: Pointer to EventNotification reply frame
  *	@evHandlers: Pointer to integer, number of event handlers
  *
+ *	Routes a received EventNotificationReply to all currently registered
+ *	event handlers.
  *	Returns sum of event handlers return values.
  */
 static int
@@ -6056,7 +6075,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_fc_log_info - Log information returned from Fibre Channel IOC.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@log_info: U32 LogInfo reply word from the IOC
@@ -6077,7 +6096,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@mr: Pointer to MPT reply frame
@@ -6200,7 +6219,7 @@
 	};
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_sas_log_info - Log information returned from SAS IOC.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@log_info: U32 LogInfo reply word from the IOC
@@ -6255,7 +6274,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@ioc_status: U32 IOCStatus word from IOC
@@ -6416,7 +6435,7 @@
 EXPORT_SYMBOL(mptbase_sas_persist_operation);
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	fusion_init - Fusion MPT base driver initialization routine.
  *
  *	Returns 0 for success, non-zero for failure.
@@ -6456,7 +6475,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	fusion_exit - Perform driver unload cleanup.
  *
  *	This routine frees all resources associated with each MPT adapter
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 1dd4917..ca2f910 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1018,9 +1018,10 @@
 }
 
 static void
-mptfc_setup_reset(void *arg)
+mptfc_setup_reset(struct work_struct *work)
 {
-	MPT_ADAPTER		*ioc = (MPT_ADAPTER *)arg;
+	MPT_ADAPTER		*ioc =
+		container_of(work, MPT_ADAPTER, fc_setup_reset_work);
 	u64			pn;
 	struct mptfc_rport_info *ri;
 
@@ -1043,9 +1044,10 @@
 }
 
 static void
-mptfc_rescan_devices(void *arg)
+mptfc_rescan_devices(struct work_struct *work)
 {
-	MPT_ADAPTER		*ioc = (MPT_ADAPTER *)arg;
+	MPT_ADAPTER		*ioc =
+		container_of(work, MPT_ADAPTER, fc_rescan_work);
 	int			ii;
 	u64			pn;
 	struct mptfc_rport_info *ri;
@@ -1154,8 +1156,8 @@
         }
 
 	spin_lock_init(&ioc->fc_rescan_work_lock);
-	INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc);
-	INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc);
+	INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
+	INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
 
 	spin_lock_irqsave(&ioc->FreeQlock, flags);
 
@@ -1393,8 +1395,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptfc_init - Register MPT adapter(s) as SCSI host(s) with
- *	linux scsi mid-layer.
+ *	mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
  *
  *	Returns 0 for success, non-zero for failure.
  */
@@ -1438,7 +1439,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptfc_remove - Removed fc infrastructure for devices
+ *	mptfc_remove - Remove fc infrastructure for devices
  *	@pdev: Pointer to pci_dev structure
  *
  */
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 314c3a2..b7c4407 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -111,7 +111,8 @@
 	u32 total_received;
 	struct net_device_stats stats;	/* Per device statistics */
 
-	struct work_struct post_buckets_task;
+	struct delayed_work post_buckets_task;
+	struct net_device *dev;
 	unsigned long post_buckets_active;
 };
 
@@ -132,7 +133,7 @@
 static int  mpt_lan_open(struct net_device *dev);
 static int  mpt_lan_reset(struct net_device *dev);
 static int  mpt_lan_close(struct net_device *dev);
-static void mpt_lan_post_receive_buckets(void *dev_id);
+static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
 					   int priority);
 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
@@ -345,7 +346,7 @@
 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
 	} else {
-		mpt_lan_post_receive_buckets(dev);
+		mpt_lan_post_receive_buckets(priv);
 		netif_wake_queue(dev);
 	}
 
@@ -441,7 +442,7 @@
 
 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
 
-	mpt_lan_post_receive_buckets(dev);
+	mpt_lan_post_receive_buckets(priv);
 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
 			IOC_AND_NETDEV_NAMES_s_s(dev));
 
@@ -854,7 +855,7 @@
 	
 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
 		if (priority) {
-			schedule_work(&priv->post_buckets_task);
+			schedule_delayed_work(&priv->post_buckets_task, 0);
 		} else {
 			schedule_delayed_work(&priv->post_buckets_task, 1);
 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
@@ -1188,10 +1189,9 @@
 /* Simple SGE's only at the moment */
 
 static void
-mpt_lan_post_receive_buckets(void *dev_id)
+mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
 {
-	struct net_device *dev = dev_id;
-	struct mpt_lan_priv *priv = dev->priv;
+	struct net_device *dev = priv->dev;
 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 	MPT_FRAME_HDR *mf;
 	LANReceivePostRequest_t *pRecvReq;
@@ -1335,6 +1335,13 @@
 	clear_bit(0, &priv->post_buckets_active);
 }
 
+static void
+mpt_lan_post_receive_buckets_work(struct work_struct *work)
+{
+	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
+						  post_buckets_task.work));
+}
+
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static struct net_device *
 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
@@ -1350,11 +1357,13 @@
 
 	priv = netdev_priv(dev);
 
+	priv->dev = dev;
 	priv->mpt_dev = mpt_dev;
 	priv->pnum = pnum;
 
-	memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
-	INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
+	memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
+	INIT_DELAYED_WORK(&priv->post_buckets_task,
+			  mpt_lan_post_receive_buckets_work);
 	priv->post_buckets_active = 0;
 
 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b752a47..4f0c530 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2006,9 +2006,10 @@
  *(Mutex LOCKED)
  */
 static void
-mptsas_discovery_work(void * arg)
+mptsas_discovery_work(struct work_struct *work)
 {
-	struct mptsas_discovery_event *ev = arg;
+	struct mptsas_discovery_event *ev =
+		container_of(work, struct mptsas_discovery_event, work);
 	MPT_ADAPTER *ioc = ev->ioc;
 
 	mutex_lock(&ioc->sas_discovery_mutex);
@@ -2068,9 +2069,9 @@
  * Work queue thread to clear the persitency table
  */
 static void
-mptsas_persist_clear_table(void * arg)
+mptsas_persist_clear_table(struct work_struct *work)
 {
-	MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+	MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
 
 	mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
 }
@@ -2093,9 +2094,10 @@
  * Work queue thread to handle SAS hotplug events
  */
 static void
-mptsas_hotplug_work(void *arg)
+mptsas_hotplug_work(struct work_struct *work)
 {
-	struct mptsas_hotplug_event *ev = arg;
+	struct mptsas_hotplug_event *ev =
+		container_of(work, struct mptsas_hotplug_event, work);
 	MPT_ADAPTER *ioc = ev->ioc;
 	struct mptsas_phyinfo *phy_info;
 	struct sas_rphy *rphy;
@@ -2341,7 +2343,7 @@
 			break;
 		}
 
-		INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+		INIT_WORK(&ev->work, mptsas_hotplug_work);
 		ev->ioc = ioc;
 		ev->handle = le16_to_cpu(sas_event_data->DevHandle);
 		ev->parent_handle =
@@ -2366,7 +2368,7 @@
 	 * Persistent table is full.
 	 */
 		INIT_WORK(&ioc->sas_persist_task,
-		    mptsas_persist_clear_table, (void *)ioc);
+		    mptsas_persist_clear_table);
 		schedule_work(&ioc->sas_persist_task);
 		break;
 	case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
@@ -2395,7 +2397,7 @@
 		return;
 	}
 
-	INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+	INIT_WORK(&ev->work, mptsas_hotplug_work);
 	ev->ioc = ioc;
 	ev->id = raid_event_data->VolumeID;
 	ev->event_type = MPTSAS_IGNORE_EVENT;
@@ -2474,7 +2476,7 @@
 	ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
 	if (!ev)
 		return;
-	INIT_WORK(&ev->work, mptsas_discovery_work, ev);
+	INIT_WORK(&ev->work, mptsas_discovery_work);
 	ev->ioc = ioc;
 	schedule_work(&ev->work);
 };
@@ -2511,8 +2513,7 @@
 		break;
 	case MPI_EVENT_PERSISTENT_TABLE_FULL:
 		INIT_WORK(&ioc->sas_persist_task,
-		    mptsas_persist_clear_table,
-		    (void *)ioc);
+		    mptsas_persist_clear_table);
 		schedule_work(&ioc->sas_persist_task);
 		break;
 	 case MPI_EVENT_SAS_DISCOVERY:
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 30524dc..2c72c36 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1230,15 +1230,15 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mptscsih_proc_info - Return information about MPT adapter
+ * 	@host:   scsi host struct
+ * 	@buffer: if write, user data; if read, buffer for user
+ *	@start: returns the buffer address
+ * 	@offset: if write, 0; if read, the current offset into the buffer from
+ * 		 the previous read.
+ * 	@length: if write, return length;
+ *	@func:   write = 1; read = 0
  *
  *	(linux scsi_host_template.info routine)
- *
- * 	buffer: if write, user data; if read, buffer for user
- * 	length: if write, return length;
- * 	offset: if write, 0; if read, the current offset into the buffer from
- * 		the previous read.
- * 	hostno: scsi host number
- *	func:   if write = 1; if read = 0
  */
 int
 mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
@@ -1902,8 +1902,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptscsih_host_reset - Perform a SCSI host adapter RESET!
- *	new_eh variant
+ *	mptscsih_host_reset - Perform a SCSI host adapter RESET (new_eh variant)
  *	@SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
  *
  *	(linux scsi_host_template.eh_host_reset_handler routine)
@@ -1949,8 +1948,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptscsih_tm_pending_wait - wait for pending task management request to
- *		complete.
+ *	mptscsih_tm_pending_wait - wait for pending task management request to complete
  *	@hd: Pointer to MPT host structure.
  *
  *	Returns {SUCCESS,FAILED}.
@@ -1982,6 +1980,7 @@
 /**
  *	mptscsih_tm_wait_for_completion - wait for completion of TM task
  *	@hd: Pointer to MPT host structure.
+ *	@timeout: timeout in seconds
  *
  *	Returns {SUCCESS,FAILED}.
  */
@@ -3429,8 +3428,7 @@
 /**
  *	mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
  *	@hd: Pointer to a SCSI HOST structure
- *	@vtarget: per device private data
- *	@lun: lun
+ *	@vdevice: virtual target device
  *
  *	Uses the ISR, but with special processing.
  *	MUST be single-threaded.
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e4cc3dd..36641da 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -646,9 +646,10 @@
 	int			disk;
 };
 
-static void mpt_work_wrapper(void *data)
+static void mpt_work_wrapper(struct work_struct *work)
 {
-	struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+	struct work_queue_wrapper *wqw =
+		container_of(work, struct work_queue_wrapper, work);
 	struct _MPT_SCSI_HOST *hd = wqw->hd;
 	struct Scsi_Host *shost = hd->ioc->sh;
 	struct scsi_device *sdev;
@@ -695,7 +696,7 @@
 			   disk);
 		return;
 	}
-	INIT_WORK(&wqw->work, mpt_work_wrapper, wqw);
+	INIT_WORK(&wqw->work, mpt_work_wrapper);
 	wqw->hd = hd;
 	wqw->disk = disk;
 
@@ -784,9 +785,10 @@
  * renegotiate for a given target
  */
 static void
-mptspi_dv_renegotiate_work(void *data)
+mptspi_dv_renegotiate_work(struct work_struct *work)
 {
-	struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+	struct work_queue_wrapper *wqw =
+		container_of(work, struct work_queue_wrapper, work);
 	struct _MPT_SCSI_HOST *hd = wqw->hd;
 	struct scsi_device *sdev;
 
@@ -804,7 +806,7 @@
 	if (!wqw)
 		return;
 
-	INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
+	INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
 	wqw->hd = hd;
 
 	schedule_work(&wqw->work);
@@ -1098,8 +1100,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptspi_init - Register MPT adapter(s) as SCSI host(s) with
- *	linux scsi mid-layer.
+ *	mptspi_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
  *
  *	Returns 0 for success, non-zero for failure.
  */
@@ -1133,7 +1134,6 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mptspi_exit - Unregisters MPT adapter(s)
- *
  */
 static void __exit
 mptspi_exit(void)
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index d96c687..c463dc2 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -56,6 +56,9 @@
 /**
  *	i2o_bus_store_scan - Scan the I2O Bus Adapter
  *	@d: device which should be scanned
+ *	@attr: device_attribute
+ *	@buf: output buffer
+ *	@count: buffer size
  *
  *	Returns count.
  */
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index ee18305..b9df143 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -54,8 +54,8 @@
  *	@dev: I2O device to claim
  *	@drv: I2O driver which wants to claim the device
  *
- *	Do the leg work to assign a device to a given OSM. If the claim succeed
- *	the owner of the rimary. If the attempt fails a negative errno code
+ *	Do the leg work to assign a device to a given OSM. If the claim succeeds,
+ *	the owner is the primary. If the attempt fails a negative errno code
  *	is returned. On success zero is returned.
  */
 int i2o_device_claim(struct i2o_device *dev)
@@ -208,24 +208,23 @@
 
 /**
  *	i2o_device_add - allocate a new I2O device and add it to the IOP
- *	@iop: I2O controller where the device is on
+ *	@c: I2O controller that the device is on
  *	@entry: LCT entry of the I2O device
  *
  *	Allocate a new I2O device and initialize it with the LCT entry. The
  *	device is appended to the device list of the controller.
  *
- *	Returns a pointer to the I2O device on success or negative error code
- *	on failure.
+ *	Returns zero on success, or a -ve errno.
  */
-static struct i2o_device *i2o_device_add(struct i2o_controller *c,
-					 i2o_lct_entry * entry)
+static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
 {
 	struct i2o_device *i2o_dev, *tmp;
+	int rc;
 
 	i2o_dev = i2o_device_alloc();
 	if (IS_ERR(i2o_dev)) {
 		printk(KERN_ERR "i2o: unable to allocate i2o device\n");
-		return i2o_dev;
+		return PTR_ERR(i2o_dev);
 	}
 
 	i2o_dev->lct_data = *entry;
@@ -236,7 +235,9 @@
 	i2o_dev->iop = c;
 	i2o_dev->device.parent = &c->device;
 
-	device_register(&i2o_dev->device);
+	rc = device_register(&i2o_dev->device);
+	if (rc)
+		goto err;
 
 	list_add_tail(&i2o_dev->list, &c->devices);
 
@@ -270,12 +271,16 @@
 
 	pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
 
-	return i2o_dev;
+	return 0;
+
+err:
+	kfree(i2o_dev);
+	return rc;
 }
 
 /**
  *	i2o_device_remove - remove an I2O device from the I2O core
- *	@dev: I2O device which should be released
+ *	@i2o_dev: I2O device which should be released
  *
  *	Is used on I2O controller removal or LCT modification, when the device
  *	is removed from the system. Note that the device could still hang
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 6413022..9104b65 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -34,9 +34,7 @@
 static struct i2o_driver **i2o_drivers;
 
 /**
- *	i2o_bus_match - Tell if a I2O device class id match the class ids of
- *			the I2O driver (OSM)
- *
+ *	i2o_bus_match - Tell if I2O device class id matches the class ids of the I2O driver (OSM)
  *	@dev: device which should be verified
  *	@drv: the driver to match against
  *
@@ -232,7 +230,7 @@
 			break;
 		}
 
-		INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt);
+		INIT_WORK(&evt->work, drv->event);
 		queue_work(drv->event_queue, &evt->work);
 		return 1;
 	}
@@ -248,7 +246,7 @@
 
 /**
  *	i2o_driver_notify_controller_add_all - Send notify of added controller
- *					       to all I2O drivers
+ *	@c: newly added controller
  *
  *	Send notifications to all registered drivers that a new controller was
  *	added.
@@ -267,8 +265,8 @@
 }
 
 /**
- *	i2o_driver_notify_controller_remove_all - Send notify of removed
- *						  controller to all I2O drivers
+ *	i2o_driver_notify_controller_remove_all - Send notify of removed controller
+ *	@c: controller that is being removed
  *
  *	Send notifications to all registered drivers that a controller was
  *	removed.
@@ -287,8 +285,8 @@
 }
 
 /**
- *	i2o_driver_notify_device_add_all - Send notify of added device to all
- *					   I2O drivers
+ *	i2o_driver_notify_device_add_all - Send notify of added device
+ *	@i2o_dev: newly added I2O device
  *
  *	Send notifications to all registered drivers that a device was added.
  */
@@ -306,8 +304,8 @@
 }
 
 /**
- *	i2o_driver_notify_device_remove_all - Send notify of removed device to
- *					      all I2O drivers
+ *	i2o_driver_notify_device_remove_all - Send notify of removed device
+ *	@i2o_dev: device that is being removed
  *
  *	Send notifications to all registered drivers that a device was removed.
  */
@@ -362,7 +360,7 @@
 /**
  *	i2o_driver_exit - clean up I2O drivers (OSMs)
  *
- *	Unregisters the I2O bus and free driver array.
+ *	Unregisters the I2O bus and frees driver array.
  */
 void __exit i2o_driver_exit(void)
 {
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index a235064..902753b 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -94,8 +94,8 @@
 };
 
 /**
- *	i2o_exec_wait_free - Free a i2o_exec_wait struct
- *	@i2o_exec_wait: I2O wait data which should be cleaned up
+ *	i2o_exec_wait_free - Free an i2o_exec_wait struct
+ *	@wait: I2O wait data which should be cleaned up
  */
 static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
 {
@@ -105,7 +105,7 @@
 /**
  * 	i2o_msg_post_wait_mem - Post and wait a message with DMA buffers
  *	@c: controller
- *	@m: message to post
+ *	@msg: message to post
  *	@timeout: time in seconds to wait
  *	@dma: i2o_dma struct of the DMA buffer to free on failure
  *
@@ -269,6 +269,7 @@
 /**
  *	i2o_exec_show_vendor_id - Displays Vendor ID of controller
  *	@d: device of which the Vendor ID should be displayed
+ *	@attr: device_attribute to display
  *	@buf: buffer into which the Vendor ID should be printed
  *
  *	Returns number of bytes printed into buffer.
@@ -290,6 +291,7 @@
 /**
  *	i2o_exec_show_product_id - Displays Product ID of controller
  *	@d: device of which the Product ID should be displayed
+ *	@attr: device_attribute to display
  *	@buf: buffer into which the Product ID should be printed
  *
  *	Returns number of bytes printed into buffer.
@@ -365,14 +367,16 @@
 
 /**
  *	i2o_exec_lct_modified - Called on LCT NOTIFY reply
- *	@c: I2O controller on which the LCT has modified
+ *	@work: work struct for a specific controller
  *
  *	This function handles asynchronus LCT NOTIFY replies. It parses the
  *	new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
  *	again, otherwise send LCT NOTIFY to get informed on next LCT change.
  */
-static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work)
+static void i2o_exec_lct_modified(struct work_struct *_work)
 {
+	struct i2o_exec_lct_notify_work *work =
+		container_of(_work, struct i2o_exec_lct_notify_work, work);
 	u32 change_ind = 0;
 	struct i2o_controller *c = work->c;
 
@@ -439,8 +443,7 @@
 
 		work->c = c;
 
-		INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified,
-			  work);
+		INIT_WORK(&work->work, i2o_exec_lct_modified);
 		queue_work(i2o_exec_driver.event_queue, &work->work);
 		return 1;
 	}
@@ -460,13 +463,15 @@
 
 /**
  *	i2o_exec_event - Event handling function
- *	@evt: Event which occurs
+ *	@work: Work item in occurring event
  *
  *	Handles events send by the Executive device. At the moment does not do
  *	anything useful.
  */
-static void i2o_exec_event(struct i2o_event *evt)
+static void i2o_exec_event(struct work_struct *work)
 {
+	struct i2o_event *evt = container_of(work, struct i2o_event, work);
+
 	if (likely(evt->i2o_dev))
 		osm_debug("Event received from device: %d\n",
 			  evt->i2o_dev->lct_data.tid);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index eaba81b..da9859f 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -259,7 +259,7 @@
 /**
  *	i2o_block_device_power - Power management for device dev
  *	@dev: I2O device which should receive the power management request
- *	@operation: Operation which should be send
+ *	@op: Operation to send
  *
  *	Send a power management request to the device dev.
  *
@@ -315,7 +315,7 @@
  *	i2o_block_request_free - Frees a I2O block request
  *	@ireq: I2O block request which should be freed
  *
- *	Fres the allocated memory (give it back to the request mempool).
+ *	Frees the allocated memory (give it back to the request mempool).
  */
 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
 {
@@ -326,6 +326,7 @@
  *	i2o_block_sglist_alloc - Allocate the SG list and map it
  *	@c: I2O controller to which the request belongs
  *	@ireq: I2O block request
+ *	@mptr: message body pointer
  *
  *	Builds the SG list and map it to be accessable by the controller.
  *
@@ -419,16 +420,18 @@
 
 /**
  *	i2o_block_delayed_request_fn - delayed request queue function
- *	delayed_request: the delayed request with the queue to start
+ *	@work: the delayed request with the queue to start
  *
  *	If the request queue is stopped for a disk, and there is no open
  *	request, a new event is created, which calls this function to start
  *	the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
  *	be started again.
  */
-static void i2o_block_delayed_request_fn(void *delayed_request)
+static void i2o_block_delayed_request_fn(struct work_struct *work)
 {
-	struct i2o_block_delayed_request *dreq = delayed_request;
+	struct i2o_block_delayed_request *dreq =
+		container_of(work, struct i2o_block_delayed_request,
+			     work.work);
 	struct request_queue *q = dreq->queue;
 	unsigned long flags;
 
@@ -488,7 +491,7 @@
  *	i2o_block_reply - Block OSM reply handler.
  *	@c: I2O controller from which the message arrives
  *	@m: message id of reply
- *	qmsg: the actuall I2O message reply
+ *	@msg: the actual I2O message reply
  *
  *	This function gets all the message replies.
  *
@@ -538,8 +541,9 @@
 	return 1;
 };
 
-static void i2o_block_event(struct i2o_event *evt)
+static void i2o_block_event(struct work_struct *work)
 {
+	struct i2o_event *evt = container_of(work, struct i2o_event, work);
 	osm_debug("event received\n");
 	kfree(evt);
 };
@@ -599,6 +603,8 @@
 
 /**
  *	i2o_block_open - Open the block device
+ *	@inode: inode for block device being opened
+ *	@file: file to open
  *
  *	Power up the device, mount and lock the media. This function is called,
  *	if the block device is opened for access.
@@ -626,6 +632,8 @@
 
 /**
  *	i2o_block_release - Release the I2O block device
+ *	@inode: inode for block device being released
+ *	@file: file to close
  *
  *	Unlock and unmount the media, and power down the device. Gets called if
  *	the block device is closed.
@@ -672,6 +680,8 @@
 
 /**
  *	i2o_block_ioctl - Issue device specific ioctl calls.
+ *	@inode: inode for block device ioctl
+ *	@file: file for ioctl
  *	@cmd: ioctl command
  *	@arg: arg
  *
@@ -899,7 +909,7 @@
 
 /**
  *	i2o_block_request_fn - request queue handling function
- *	q: request queue from which the request could be fetched
+ *	@q: request queue from which the request could be fetched
  *
  *	Takes the next request from the queue, transfers it and if no error
  *	occurs dequeue it from the queue. On arrival of the reply the message
@@ -938,8 +948,8 @@
 				continue;
 
 			dreq->queue = q;
-			INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
-				  dreq);
+			INIT_DELAYED_WORK(&dreq->work,
+					  i2o_block_delayed_request_fn);
 
 			if (!queue_delayed_work(i2o_block_driver.event_queue,
 						&dreq->work,
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
index 4fdaa5b..67f921b 100644
--- a/drivers/message/i2o/i2o_block.h
+++ b/drivers/message/i2o/i2o_block.h
@@ -64,7 +64,7 @@
 
 /* I2O Block OSM mempool struct */
 struct i2o_block_mempool {
-	kmem_cache_t *slab;
+	struct kmem_cache *slab;
 	mempool_t *pool;
 };
 
@@ -96,7 +96,7 @@
 
 /* I2O Block device delayed request */
 struct i2o_block_delayed_request {
-	struct work_struct work;
+	struct delayed_work work;
 	struct request_queue *queue;
 };
 
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 7d23e08..1de30d7 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -265,7 +265,11 @@
 		return -ENOMEM;
 	}
 
-	__copy_from_user(buffer.virt, kxfer.buf, fragsize);
+	if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) {
+		i2o_msg_nop(c, msg);
+		i2o_dma_free(&c->pdev->dev, &buffer);
+		return -EFAULT;
+	}
 
 	msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
 	msg->u.head[1] =
@@ -516,7 +520,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_I2O_EXT_ADAPTEC
 #ifdef CONFIG_COMPAT
 static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
 			      unsigned long arg)
@@ -759,6 +762,7 @@
 
 #endif
 
+#ifdef CONFIG_I2O_EXT_ADAPTEC
 static int i2o_cfg_passthru(unsigned long arg)
 {
 	struct i2o_cmd_passthru __user *cmd =
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 3d2e76e..a61cb17 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -163,7 +163,7 @@
  *	i2o_get_class_name - 	do i2o class name lookup
  *	@class: class number
  *
- *	Return a descriptive string for an i2o class
+ *	Return a descriptive string for an i2o class.
  */
 static const char *i2o_get_class_name(int class)
 {
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 6ebf382..1045c8a 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -220,7 +220,7 @@
 	u32 id = -1;
 	u64 lun = -1;
 	int channel = -1;
-	int i;
+	int i, rc;
 
 	i2o_shost = i2o_scsi_get_host(c);
 	if (!i2o_shost)
@@ -304,14 +304,20 @@
 		return PTR_ERR(scsi_dev);
 	}
 
-	sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
-			  "scsi");
+	rc = sysfs_create_link(&i2o_dev->device.kobj,
+			       &scsi_dev->sdev_gendev.kobj, "scsi");
+	if (rc)
+		goto err;
 
 	osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
 		 i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
 		 (long unsigned int)le64_to_cpu(lun));
 
 	return 0;
+
+err:
+	scsi_remove_device(scsi_dev);
+	return rc;
 };
 
 static const char *i2o_scsi_info(struct Scsi_Host *SChost)
@@ -405,8 +411,7 @@
 };
 
 /**
- *	i2o_scsi_notify_device_remove - Retrieve notifications of removed
- *				        devices
+ *	i2o_scsi_notify_device_remove - Retrieve notifications of removed devices
  *	@i2o_dev: the I2O device which was removed
  *
  *	If a I2O device is removed, we catch the notification to remove the
@@ -426,8 +431,7 @@
 };
 
 /**
- *	i2o_scsi_notify_controller_add - Retrieve notifications of added
- *					 controllers
+ *	i2o_scsi_notify_controller_add - Retrieve notifications of added controllers
  *	@c: the controller which was added
  *
  *	If a I2O controller is added, we catch the notification to add a
@@ -457,8 +461,7 @@
 };
 
 /**
- *	i2o_scsi_notify_controller_remove - Retrieve notifications of removed
- *					    controllers
+ *	i2o_scsi_notify_controller_remove - Retrieve notifications of removed controllers
  *	@c: the controller which was removed
  *
  *	If a I2O controller is removed, we catch the notification to remove the
@@ -745,7 +748,7 @@
  *	@capacity: size in sectors
  *	@ip: geometry array
  *
- *	This is anyones guess quite frankly. We use the same rules everyone
+ *	This is anyone's guess quite frankly. We use the same rules everyone
  *	else appears to and hope. It seems to work.
  */
 
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index 8287f95..3661e6e 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -259,6 +259,7 @@
 
 /**
  *	i2o_pci_irq_enable - Allocate interrupt for I2O controller
+ *	@c: i2o_controller that the request is for
  *
  *	Allocate an interrupt for the I2O controller, and activate interrupts
  *	on the I2O controller.
@@ -305,7 +306,7 @@
 
 /**
  *	i2o_pci_probe - Probe the PCI device for an I2O controller
- *	@dev: PCI device to test
+ *	@pdev: PCI device to test
  *	@id: id which matched with the PCI device id table
  *
  *	Probe the PCI device for any device which is a memory of the
@@ -447,7 +448,7 @@
 
 /**
  *	i2o_pci_remove - Removes a I2O controller from the system
- *	pdev: I2O controller which should be removed
+ *	@pdev: I2O controller which should be removed
  *
  *	Reset the I2O controller, disable interrupts and remove all allocated
  *	resources.
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 82938ad..ce1a481 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -28,7 +28,7 @@
 #include <linux/string.h>
 #include <linux/input.h>
 #include <linux/device.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/slab.h>
 #include <linux/kthread.h>
 
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 1ba8754..2ab7add 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -33,9 +33,10 @@
 	spin_unlock_irqrestore(&fm->lock, flags);
 }
 
-static void tifm_7xx1_remove_media(void *adapter)
+static void tifm_7xx1_remove_media(struct work_struct *work)
 {
-	struct tifm_adapter *fm = adapter;
+	struct tifm_adapter *fm =
+		container_of(work, struct tifm_adapter, media_remover);
 	unsigned long flags;
 	int cnt;
 	struct tifm_dev *sock;
@@ -169,9 +170,10 @@
 	return base_addr + ((sock_num + 1) << 10);
 }
 
-static void tifm_7xx1_insert_media(void *adapter)
+static void tifm_7xx1_insert_media(struct work_struct *work)
 {
-	struct tifm_adapter *fm = adapter;
+	struct tifm_adapter *fm =
+		container_of(work, struct tifm_adapter, media_inserter);
 	unsigned long flags;
 	tifm_media_id media_id;
 	char *card_name = "xx";
@@ -261,7 +263,7 @@
 	spin_unlock_irqrestore(&fm->lock, flags);
 	flush_workqueue(fm->wq);
 
-	tifm_7xx1_remove_media(fm);
+	tifm_7xx1_remove_media(&fm->media_remover);
 
 	pci_set_power_state(dev, PCI_D3hot);
         pci_disable_device(dev);
@@ -328,8 +330,8 @@
 	if (!fm->sockets)
 		goto err_out_free;
 
-	INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm);
-	INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm);
+	INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
+	INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
 	fm->eject = tifm_7xx1_eject;
 	pci_set_drvdata(dev, fm);
 
@@ -384,7 +386,7 @@
 
 	flush_workqueue(fm->wq);
 
-	tifm_7xx1_remove_media(fm);
+	tifm_7xx1_remove_media(&fm->media_remover);
 
 	writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
 	free_irq(dev->irq, fm);
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index ee32613..d61df5c 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -219,8 +219,9 @@
 	struct tifm_driver *drv = fm_dev->drv;
 
 	if (drv) {
-		if (drv->remove) drv->remove(fm_dev);
-		fm_dev->drv = 0;
+		if (drv->remove)
+			drv->remove(fm_dev);
+		fm_dev->drv = NULL;
 	}
 
 	put_device(dev);
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 9d19002..6f2a282 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -1419,18 +1419,16 @@
  */
 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
 {
-	if (delay)
-		mmc_schedule_delayed_work(&host->detect, delay);
-	else
-		mmc_schedule_work(&host->detect);
+	mmc_schedule_delayed_work(&host->detect, delay);
 }
 
 EXPORT_SYMBOL(mmc_detect_change);
 
 
-static void mmc_rescan(void *data)
+static void mmc_rescan(struct work_struct *work)
 {
-	struct mmc_host *host = data;
+	struct mmc_host *host =
+		container_of(work, struct mmc_host, detect.work);
 	struct list_head *l, *n;
 	unsigned char power_mode;
 
@@ -1513,7 +1511,7 @@
 		spin_lock_init(&host->lock);
 		init_waitqueue_head(&host->wq);
 		INIT_LIST_HEAD(&host->cards);
-		INIT_WORK(&host->detect, mmc_rescan, host);
+		INIT_DELAYED_WORK(&host->detect, mmc_rescan);
 
 		/*
 		 * By default, hosts do not support SGIO or large requests.
@@ -1611,7 +1609,7 @@
  */
 int mmc_resume_host(struct mmc_host *host)
 {
-	mmc_rescan(host);
+	mmc_rescan(&host->detect.work);
 
 	return 0;
 }
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
index cd5e0ab..149affe 100644
--- a/drivers/mmc/mmc.h
+++ b/drivers/mmc/mmc.h
@@ -20,6 +20,6 @@
 void mmc_free_host_sysfs(struct mmc_host *host);
 
 int mmc_schedule_work(struct work_struct *work);
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay);
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay);
 void mmc_flush_scheduled_work(void);
 #endif
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index ac53296..e334acd 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -321,17 +321,9 @@
 static struct workqueue_struct *workqueue;
 
 /*
- * Internal function. Schedule work in the MMC work queue.
- */
-int mmc_schedule_work(struct work_struct *work)
-{
-	return queue_work(workqueue, work);
-}
-
-/*
  * Internal function. Schedule delayed work in the MMC work queue.
  */
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
 {
 	return queue_delayed_work(workqueue, work, delay);
 }
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index 0fdc55b..e846499 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -99,7 +99,7 @@
 
 	struct mmc_request    *req;
 	struct work_struct    cmd_handler;
-	struct work_struct    abort_handler;
+	struct delayed_work   abort_handler;
 	wait_queue_head_t     can_eject;
 
 	size_t                written_blocks;
@@ -496,9 +496,9 @@
 	mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_end_cmd(void *data)
+static void tifm_sd_end_cmd(struct work_struct *work)
 {
-	struct tifm_sd *host = data;
+	struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
 	struct tifm_dev *sock = host->dev;
 	struct mmc_host *mmc = tifm_get_drvdata(sock);
 	struct mmc_request *mrq;
@@ -608,9 +608,9 @@
 	mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_end_cmd_nodma(void *data)
+static void tifm_sd_end_cmd_nodma(struct work_struct *work)
 {
-	struct tifm_sd *host = (struct tifm_sd*)data;
+	struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
 	struct tifm_dev *sock = host->dev;
 	struct mmc_host *mmc = tifm_get_drvdata(sock);
 	struct mmc_request *mrq;
@@ -661,11 +661,14 @@
 	mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_abort(void *data)
+static void tifm_sd_abort(struct work_struct *work)
 {
+	struct tifm_sd *host =
+		container_of(work, struct tifm_sd, abort_handler.work);
+
 	printk(KERN_ERR DRIVER_NAME
 		": card failed to respond for a long period of time");
-	tifm_eject(((struct tifm_sd*)data)->dev);
+	tifm_eject(host->dev);
 }
 
 static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -762,9 +765,9 @@
 	.get_ro  = tifm_sd_ro
 };
 
-static void tifm_sd_register_host(void *data)
+static void tifm_sd_register_host(struct work_struct *work)
 {
-	struct tifm_sd *host = (struct tifm_sd*)data;
+	struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
 	struct tifm_dev *sock = host->dev;
 	struct mmc_host *mmc = tifm_get_drvdata(sock);
 	unsigned long flags;
@@ -772,8 +775,7 @@
 	spin_lock_irqsave(&sock->lock, flags);
 	host->flags |= HOST_REG;
 	PREPARE_WORK(&host->cmd_handler,
-			no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
-			data);
+			no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd);
 	spin_unlock_irqrestore(&sock->lock, flags);
 	dev_dbg(&sock->dev, "adding host\n");
 	mmc_add_host(mmc);
@@ -799,8 +801,8 @@
 	host->dev = sock;
 	host->clk_div = 61;
 	init_waitqueue_head(&host->can_eject);
-	INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host);
-	INIT_WORK(&host->abort_handler, tifm_sd_abort, host);
+	INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
+	INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
 
 	tifm_set_drvdata(sock, mmc);
 	sock->signal_irq = tifm_sd_signal_irq;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index ef4a731..334e078f 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -451,7 +451,7 @@
 		return -ENODEV;
 	}
 
-	flash = kzalloc(sizeof *flash, SLAB_KERNEL);
+	flash = kzalloc(sizeof *flash, GFP_KERNEL);
 	if (!flash)
 		return -ENOMEM;
 
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index d02ed51..931028f 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -594,7 +594,7 @@
 	u32 rx_config;
 	struct rtl_extra_stats xstats;
 
-	struct work_struct thread;
+	struct delayed_work thread;
 
 	struct mii_if_info mii;
 	unsigned int regs_len;
@@ -636,8 +636,8 @@
 static void rtl8139_set_rx_mode (struct net_device *dev);
 static void __set_rx_mode (struct net_device *dev);
 static void rtl8139_hw_start (struct net_device *dev);
-static void rtl8139_thread (void *_data);
-static void rtl8139_tx_timeout_task(void *_data);
+static void rtl8139_thread (struct work_struct *work);
+static void rtl8139_tx_timeout_task(struct work_struct *work);
 static const struct ethtool_ops rtl8139_ethtool_ops;
 
 /* write MMIO register, with flush */
@@ -1010,7 +1010,7 @@
 		(debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
 	spin_lock_init (&tp->lock);
 	spin_lock_init (&tp->rx_lock);
-	INIT_WORK(&tp->thread, rtl8139_thread, dev);
+	INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
 	tp->mii.dev = dev;
 	tp->mii.mdio_read = mdio_read;
 	tp->mii.mdio_write = mdio_write;
@@ -1596,15 +1596,16 @@
 		 RTL_R8 (Config1));
 }
 
-static void rtl8139_thread (void *_data)
+static void rtl8139_thread (struct work_struct *work)
 {
-	struct net_device *dev = _data;
-	struct rtl8139_private *tp = netdev_priv(dev);
+	struct rtl8139_private *tp =
+		container_of(work, struct rtl8139_private, thread.work);
+	struct net_device *dev = tp->mii.dev;
 	unsigned long thr_delay = next_tick;
 
 	if (tp->watchdog_fired) {
 		tp->watchdog_fired = 0;
-		rtl8139_tx_timeout_task(_data);
+		rtl8139_tx_timeout_task(work);
 	} else if (rtnl_trylock()) {
 		rtl8139_thread_iter (dev, tp, tp->mmio_addr);
 		rtnl_unlock ();
@@ -1646,10 +1647,11 @@
 	/* XXX account for unsent Tx packets in tp->stats.tx_dropped */
 }
 
-static void rtl8139_tx_timeout_task (void *_data)
+static void rtl8139_tx_timeout_task (struct work_struct *work)
 {
-	struct net_device *dev = _data;
-	struct rtl8139_private *tp = netdev_priv(dev);
+	struct rtl8139_private *tp =
+		container_of(work, struct rtl8139_private, thread.work);
+	struct net_device *dev = tp->mii.dev;
 	void __iomem *ioaddr = tp->mmio_addr;
 	int i;
 	u8 tmp8;
@@ -1695,7 +1697,7 @@
 	struct rtl8139_private *tp = netdev_priv(dev);
 
 	if (!tp->have_thread) {
-		INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev);
+		INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task);
 		schedule_delayed_work(&tp->thread, next_tick);
 	} else
 		tp->watchdog_fired = 1;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index fc2f1d1..5bacb75 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -4411,9 +4411,9 @@
 }
 
 static void
-bnx2_reset_task(void *data)
+bnx2_reset_task(struct work_struct *work)
 {
-	struct bnx2 *bp = data;
+	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
 
 	if (!netif_running(bp->dev))
 		return;
@@ -5702,7 +5702,7 @@
 	bp->pdev = pdev;
 
 	spin_lock_init(&bp->phy_lock);
-	INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
+	INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
 	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
 	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index fd2cc13..c812648 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4066,9 +4066,9 @@
 	return 0;
 }
 
-static void cas_reset_task(void *data)
+static void cas_reset_task(struct work_struct *work)
 {
-	struct cas *cp = (struct cas *) data;
+	struct cas *cp = container_of(work, struct cas, reset_task);
 #if 0
 	int pending = atomic_read(&cp->reset_task_pending);
 #else
@@ -5006,7 +5006,7 @@
 	atomic_set(&cp->reset_task_pending_spare, 0);
 	atomic_set(&cp->reset_task_pending_mtu, 0);
 #endif
-	INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+	INIT_WORK(&cp->reset_task, cas_reset_task);
 
 	/* Default link parameters */
 	if (link_mode >= 0 && link_mode <= 6)
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index b265941..74758d2 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -279,7 +279,7 @@
 	struct petp   *tp;
 
 	struct port_info port[MAX_NPORTS];
-	struct work_struct stats_update_task;
+	struct delayed_work stats_update_task;
 	struct timer_list stats_update_timer;
 
 	spinlock_t tpi_lock;
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
index 60901f2..cf91434 100644
--- a/drivers/net/chelsio/cphy.h
+++ b/drivers/net/chelsio/cphy.h
@@ -91,7 +91,7 @@
 	int state;	/* Link status state machine */
 	adapter_t *adapter;                  /* associated adapter */
 
-	struct work_struct phy_update;
+	struct delayed_work phy_update;
 
 	u16 bmsr;
 	int count;
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 53bec67..de48ead 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -953,10 +953,11 @@
  * Periodic accumulation of MAC statistics.  This is used only if the MAC
  * does not have any other way to prevent stats counter overflow.
  */
-static void mac_stats_task(void *data)
+static void mac_stats_task(struct work_struct *work)
 {
 	int i;
-	struct adapter *adapter = data;
+	struct adapter *adapter =
+		container_of(work, struct adapter, stats_update_task.work);
 
 	for_each_port(adapter, i) {
 		struct port_info *p = &adapter->port[i];
@@ -977,9 +978,10 @@
 /*
  * Processes elmer0 external interrupts in process context.
  */
-static void ext_intr_task(void *data)
+static void ext_intr_task(struct work_struct *work)
 {
-	struct adapter *adapter = data;
+	struct adapter *adapter =
+		container_of(work, struct adapter, ext_intr_handler_task);
 
 	t1_elmer0_ext_intr_handler(adapter);
 
@@ -1113,9 +1115,9 @@
 			spin_lock_init(&adapter->mac_lock);
 
 			INIT_WORK(&adapter->ext_intr_handler_task,
-				  ext_intr_task, adapter);
-			INIT_WORK(&adapter->stats_update_task, mac_stats_task,
-				  adapter);
+				  ext_intr_task);
+			INIT_DELAYED_WORK(&adapter->stats_update_task,
+					  mac_stats_task);
 
 			pci_set_drvdata(pdev, netdev);
 		}
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 0b90014..c7731b6 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -93,9 +93,11 @@
 	return cphy_cause_link_change;
 }
 
-static void my3216_poll(void *arg)
+static void my3216_poll(struct work_struct *work)
 {
-	my3126_interrupt_handler(arg);
+	struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
+
+	my3126_interrupt_handler(cphy);
 }
 
 static int my3126_set_loopback(struct cphy *cphy, int on)
@@ -171,7 +173,7 @@
 	if (cphy)
 		cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
 
-	INIT_WORK(&cphy->phy_update, my3216_poll, cphy);
+	INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
 	cphy->bmsr = 0;
 
 	return (cphy);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3a8df47..03bf164 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2102,9 +2102,10 @@
 	schedule_work(&nic->tx_timeout_task);
 }
 
-static void e100_tx_timeout_task(struct net_device *netdev)
+static void e100_tx_timeout_task(struct work_struct *work)
 {
-	struct nic *nic = netdev_priv(netdev);
+	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
+	struct net_device *netdev = nic->netdev;
 
 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
 		readb(&nic->csr->scb.status));
@@ -2637,8 +2638,7 @@
 	nic->blink_timer.function = e100_blink_led;
 	nic->blink_timer.data = (unsigned long)nic;
 
-	INIT_WORK(&nic->tx_timeout_task,
-		(void (*)(void *))e100_tx_timeout_task, netdev);
+	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
 
 	if((err = e100_alloc(nic))) {
 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 32dde0a..73f3a85 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -190,7 +190,7 @@
 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 static void e1000_tx_timeout(struct net_device *dev);
-static void e1000_reset_task(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
 static void e1000_smartspeed(struct e1000_adapter *adapter);
 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
                                        struct sk_buff *skb);
@@ -914,8 +914,7 @@
 	adapter->phy_info_timer.function = &e1000_update_phy_info;
 	adapter->phy_info_timer.data = (unsigned long) adapter;
 
-	INIT_WORK(&adapter->reset_task,
-		(void (*)(void *))e1000_reset_task, netdev);
+	INIT_WORK(&adapter->reset_task, e1000_reset_task);
 
 	e1000_check_options(adapter);
 
@@ -3306,9 +3305,10 @@
 }
 
 static void
-e1000_reset_task(struct net_device *netdev)
+e1000_reset_task(struct work_struct *work)
 {
-	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_adapter *adapter =
+		container_of(work, struct e1000_adapter, reset_task);
 
 	e1000_reinit_locked(adapter);
 }
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 6ad6961..83fa32f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2224,11 +2224,12 @@
 	return ret;
 }
 
-static void ehea_reset_port(void *data)
+static void ehea_reset_port(struct work_struct *work)
 {
 	int ret;
-	struct net_device *dev = data;
-	struct ehea_port *port = netdev_priv(dev);
+	struct ehea_port *port =
+		container_of(work, struct ehea_port, reset_task);
+	struct net_device *dev = port->netdev;
 
 	port->resets++;
 	down(&port->port_lock);
@@ -2379,7 +2380,7 @@
 	dev->tx_timeout = &ehea_tx_watchdog;
 	dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
-	INIT_WORK(&port->reset_task, ehea_reset_port, dev);
+	INIT_WORK(&port->reset_task, ehea_reset_port);
 
 	ehea_set_ethtool_ops(dev);
 
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 1ed9ccc..3c33d6f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -168,8 +168,9 @@
 	int magic;
 
         struct pardevice *pdev;
+	struct net_device *dev;
 	unsigned int work_running;
-	struct work_struct run_work;
+	struct delayed_work run_work;
 	unsigned int modem;
 	unsigned int bitrate;
 	unsigned char stat;
@@ -659,16 +660,18 @@
 #define GETTICK(x)
 #endif /* __i386__ */
 
-static void epp_bh(struct net_device *dev)
+static void epp_bh(struct work_struct *work)
 {
+	struct net_device *dev;
 	struct baycom_state *bc;
 	struct parport *pp;
 	unsigned char stat;
 	unsigned char tmp[2];
 	unsigned int time1 = 0, time2 = 0, time3 = 0;
 	int cnt, cnt2;
-	
-	bc = netdev_priv(dev);
+
+	bc = container_of(work, struct baycom_state, run_work.work);
+	dev = bc->dev;
 	if (!bc->work_running)
 		return;
 	baycom_int_freq(bc);
@@ -889,7 +892,7 @@
                 return -EBUSY;
         }
         dev->irq = /*pp->irq*/ 0;
-	INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev);
+	INIT_DELAYED_WORK(&bc->run_work, epp_bh);
 	bc->work_running = 1;
 	bc->modem = EPP_CONVENTIONAL;
 	if (eppconfig(bc))
@@ -1213,6 +1216,7 @@
 	/*
 	 * initialize part of the baycom_state struct
 	 */
+	bc->dev = dev;
 	bc->magic = BAYCOM_MAGIC;
 	bc->cfg.fclk = 19666600;
 	bc->cfg.bps = 9600;
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 0f8b9af..e6e721a 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -252,7 +252,7 @@
 static irqreturn_t scc_isr(int irq, void *dev_id);
 static void rx_isr(struct scc_priv *priv);
 static void special_condition(struct scc_priv *priv, int rc);
-static void rx_bh(void *arg);
+static void rx_bh(struct work_struct *);
 static void tx_isr(struct scc_priv *priv);
 static void es_isr(struct scc_priv *priv);
 static void tm_isr(struct scc_priv *priv);
@@ -579,7 +579,7 @@
 		priv->param.clocks = TCTRxCP | RCRTxCP;
 		priv->param.persist = 256;
 		priv->param.dma = -1;
-		INIT_WORK(&priv->rx_work, rx_bh, priv);
+		INIT_WORK(&priv->rx_work, rx_bh);
 		dev->priv = priv;
 		sprintf(dev->name, "dmascc%i", 2 * n + i);
 		dev->base_addr = card_base;
@@ -1272,9 +1272,9 @@
 }
 
 
-static void rx_bh(void *arg)
+static void rx_bh(struct work_struct *ugli_api)
 {
-	struct scc_priv *priv = arg;
+	struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
 	int i = priv->rx_tail;
 	int cb;
 	unsigned long flags;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b32c52e..f0c61f3 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -560,9 +560,9 @@
 	return ret;
 }
 
-static void mcs_speed_work(void *arg)
+static void mcs_speed_work(struct work_struct *work)
 {
-	struct mcs_cb *mcs = arg;
+	struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
 	struct net_device *netdev = mcs->netdev;
 
 	mcs_speed_change(mcs);
@@ -927,7 +927,7 @@
 	irda_qos_bits_to_value(&mcs->qos);
 
 	/* Speed change work initialisation*/
-	INIT_WORK(&mcs->work, mcs_speed_work, mcs);
+	INIT_WORK(&mcs->work, mcs_speed_work);
 
 	/* Override the network functions we need to use */
 	ndev->hard_start_xmit = mcs_hard_xmit;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 9fa294a..2a57bc6 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -22,7 +22,7 @@
 
 struct sir_fsm {
 	struct semaphore	sem;
-	struct work_struct      work;
+	struct delayed_work	work;
 	unsigned		state, substate;
 	int			param;
 	int			result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 3b5854d..17b0c3a 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -100,9 +100,9 @@
  * Both must be unlocked/restarted on completion - but only on final exit.
  */
 
-static void sirdev_config_fsm(void *data)
+static void sirdev_config_fsm(struct work_struct *work)
 {
-	struct sir_dev *dev = data;
+	struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
 	struct sir_fsm *fsm = &dev->fsm;
 	int next_state;
 	int ret = -1;
@@ -309,8 +309,8 @@
 	fsm->param = param;
 	fsm->result = 0;
 
-	INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
-	queue_work(irda_sir_wq, &fsm->work);
+	INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
+	queue_delayed_work(irda_sir_wq, &fsm->work, 0);
 	return 0;
 }
 
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 3b4c478..c14a746 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -50,6 +50,7 @@
 #include <linux/usb.h>
 #include <linux/crc32.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include <net/irda/irda.h>
 #include <net/irda/irlap.h>
 #include <net/irda/irda_device.h>
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 2284e2c..d6f4f18 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -166,7 +166,7 @@
 
 struct veth_lpar_connection {
 	HvLpIndex remote_lp;
-	struct work_struct statemachine_wq;
+	struct delayed_work statemachine_wq;
 	struct veth_msg *msgs;
 	int num_events;
 	struct veth_cap_data local_caps;
@@ -456,7 +456,7 @@
 
 static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
 {
-	schedule_work(&cnx->statemachine_wq);
+	schedule_delayed_work(&cnx->statemachine_wq, 0);
 }
 
 static void veth_take_cap(struct veth_lpar_connection *cnx,
@@ -638,9 +638,11 @@
 }
 
 /* FIXME: The gotos here are a bit dubious */
-static void veth_statemachine(void *p)
+static void veth_statemachine(struct work_struct *work)
 {
-	struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p;
+	struct veth_lpar_connection *cnx =
+		container_of(work, struct veth_lpar_connection,
+			     statemachine_wq.work);
 	int rlp = cnx->remote_lp;
 	int rc;
 
@@ -827,7 +829,7 @@
 
 	cnx->remote_lp = rlp;
 	spin_lock_init(&cnx->lock);
-	INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx);
+	INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
 
 	init_timer(&cnx->ack_timer);
 	cnx->ack_timer.function = veth_timed_ack;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7b12721..e628126 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -106,7 +106,7 @@
 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
 void ixgb_set_ethtool_ops(struct net_device *netdev);
 static void ixgb_tx_timeout(struct net_device *dev);
-static void ixgb_tx_timeout_task(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct work_struct *work);
 static void ixgb_vlan_rx_register(struct net_device *netdev,
 				  struct vlan_group *grp);
 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -489,8 +489,7 @@
 	adapter->watchdog_timer.function = &ixgb_watchdog;
 	adapter->watchdog_timer.data = (unsigned long)adapter;
 
-	INIT_WORK(&adapter->tx_timeout_task,
-		  (void (*)(void *))ixgb_tx_timeout_task, netdev);
+	INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
 
 	strcpy(netdev->name, "eth%d");
 	if((err = register_netdev(netdev)))
@@ -1493,9 +1492,10 @@
 }
 
 static void
-ixgb_tx_timeout_task(struct net_device *netdev)
+ixgb_tx_timeout_task(struct work_struct *work)
 {
-	struct ixgb_adapter *adapter = netdev_priv(netdev);
+	struct ixgb_adapter *adapter =
+		container_of(work, struct ixgb_adapter, tx_timeout_task);
 
 	adapter->tx_timeout_count++;
 	ixgb_down(adapter, TRUE);
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index f4d815b..ea392f2 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -119,14 +119,14 @@
 #define DEB(x,y)	if (i596_debug & (x)) { y; }
 
 
-#define  CHECK_WBACK(addr,len) \
-	do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
+#define  CHECK_WBACK(priv, addr,len) \
+	do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
-#define  CHECK_INV(addr,len) \
-	do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0)
+#define  CHECK_INV(priv, addr,len) \
+	do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0)
 
-#define  CHECK_WBACK_INV(addr,len) \
-	do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
+#define  CHECK_WBACK_INV(priv, addr,len) \
+	do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
 
 
 #define PA_I82596_RESET		0	/* Offsets relative to LASI-LAN-Addr.*/
@@ -449,10 +449,10 @@
 
 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
 {
-	CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+	CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
 	while (--delcnt && lp->iscp.stat) {
 		udelay(10);
-		CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+		CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
 	}
 	if (!delcnt) {
 		printk("%s: %s, iscp.stat %04x, didn't clear\n",
@@ -466,10 +466,10 @@
 
 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
 {
-	CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
 	while (--delcnt && lp->scb.command) {
 		udelay(10);
-		CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+		CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
 	}
 	if (!delcnt) {
 		printk("%s: %s, status %4.4x, cmd %4.4x.\n",
@@ -522,7 +522,7 @@
 			rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
 		rbd = rbd->v_next;
 	} while (rbd != lp->rbd_head);
-	CHECK_INV(lp, sizeof(struct i596_private));
+	CHECK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 
@@ -592,7 +592,7 @@
 	rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
 	rfd->cmd = CMD_EOL|CMD_FLEX;
 
-	CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+	CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 static inline void remove_rx_bufs(struct net_device *dev)
@@ -629,7 +629,7 @@
 	lp->rbd_head = lp->rbds;
 	lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
 
-	CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+	CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 
@@ -663,8 +663,8 @@
 
 	DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
 
-	CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
-	CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
+	CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp));
+	CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp));
 
 	MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
 
@@ -678,25 +678,25 @@
 	rebuild_rx_bufs(dev);
 
 	lp->scb.command = 0;
-	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 
 	enable_irq(dev->irq);	/* enable IRQs from LAN */
 
 	DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
 	memcpy(lp->cf_cmd.i596_config, init_setup, 14);
 	lp->cf_cmd.cmd.command = CmdConfigure;
-	CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
+	CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd));
 	i596_add_cmd(dev, &lp->cf_cmd.cmd);
 
 	DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
 	memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
 	lp->sa_cmd.cmd.command = CmdSASetup;
-	CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
+	CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd));
 	i596_add_cmd(dev, &lp->sa_cmd.cmd);
 
 	DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
 	lp->tdr_cmd.cmd.command = CmdTDR;
-	CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
+	CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd));
 	i596_add_cmd(dev, &lp->tdr_cmd.cmd);
 
 	spin_lock_irqsave (&lp->lock, flags);
@@ -708,7 +708,7 @@
 	DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
 	lp->scb.command = RX_START;
 	lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
-	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 
 	CA(dev);
 
@@ -740,13 +740,13 @@
 
 	rfd = lp->rfd_head;		/* Ref next frame to check */
 
-	CHECK_INV(rfd, sizeof(struct i596_rfd));
+	CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
 	while ((rfd->stat) & STAT_C) {	/* Loop while complete frames */
 		if (rfd->rbd == I596_NULL)
 			rbd = NULL;
 		else if (rfd->rbd == lp->rbd_head->b_addr) {
 			rbd = lp->rbd_head;
-			CHECK_INV(rbd, sizeof(struct i596_rbd));
+			CHECK_INV(lp, rbd, sizeof(struct i596_rbd));
 		}
 		else {
 			printk("%s: rbd chain broken!\n", dev->name);
@@ -790,7 +790,7 @@
 				dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
 				rbd->v_data = newskb->data;
 				rbd->b_data = WSWAPchar(dma_addr);
-				CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+				CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
 			}
 			else
 				skb = dev_alloc_skb(pkt_len + 2);
@@ -842,7 +842,7 @@
 		if (rbd != NULL && (rbd->count & 0x4000)) {
 			rbd->count = 0;
 			lp->rbd_head = rbd->v_next;
-			CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+			CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
 		}
 
 		/* Tidy the frame descriptor, marking it as end of list */
@@ -860,10 +860,10 @@
 
 		lp->scb.rfd = rfd->b_next;
 		lp->rfd_head = rfd->v_next;
-		CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
-		CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
+		CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd));
+		CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd));
 		rfd = lp->rfd_head;
-		CHECK_INV(rfd, sizeof(struct i596_rfd));
+		CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
 	}
 
 	DEB(DEB_RXFRAME, printk("frames %d\n", frames));
@@ -902,12 +902,12 @@
 			ptr->v_next = NULL;
 			ptr->b_next = I596_NULL;
 		}
-		CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
+		CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd));
 	}
 
 	wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
 	lp->scb.cmd = I596_NULL;
-	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 }
 
 
@@ -925,7 +925,7 @@
 
 	/* FIXME: this command might cause an lpmc */
 	lp->scb.command = CUC_ABORT | RX_ABORT;
-	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 	CA(dev);
 
 	/* wait for shutdown */
@@ -951,20 +951,20 @@
 	cmd->command |= (CMD_EOL | CMD_INTR);
 	cmd->v_next = NULL;
 	cmd->b_next = I596_NULL;
-	CHECK_WBACK(cmd, sizeof(struct i596_cmd));
+	CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd));
 
 	spin_lock_irqsave (&lp->lock, flags);
 
 	if (lp->cmd_head != NULL) {
 		lp->cmd_tail->v_next = cmd;
 		lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
-		CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
+		CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd));
 	} else {
 		lp->cmd_head = cmd;
 		wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
 		lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
 		lp->scb.command = CUC_START;
-		CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+		CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 		CA(dev);
 	}
 	lp->cmd_tail = cmd;
@@ -998,12 +998,12 @@
 	data = virt_to_dma(lp,tint);
 
 	tint[1] = -1;
-	CHECK_WBACK(tint,PAGE_SIZE);
+	CHECK_WBACK(lp, tint, PAGE_SIZE);
 
 	MPU_PORT(dev, 1, data);
 
 	for(data = 1000000; data; data--) {
-		CHECK_INV(tint,PAGE_SIZE);
+		CHECK_INV(lp, tint, PAGE_SIZE);
 		if(tint[1] != -1)
 			break;
 
@@ -1061,7 +1061,7 @@
 		/* Issue a channel attention signal */
 		DEB(DEB_ERRORS, printk("Kicking board.\n"));
 		lp->scb.command = CUC_START | RX_START;
-		CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
+		CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
 		CA (dev);
 		lp->last_restart = lp->stats.tx_packets;
 	}
@@ -1118,8 +1118,8 @@
 		tbd->data = WSWAPchar(tx_cmd->dma_addr);
 
 		DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
-		CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
-		CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
+		CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd));
+		CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd));
 		i596_add_cmd(dev, &tx_cmd->cmd);
 
 		lp->stats.tx_packets++;
@@ -1228,7 +1228,7 @@
 	lp->dma_addr = dma_addr;
 	lp->dev = gen_dev;
 
-	CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
+	CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private));
 
 	i = register_netdev(dev);
 	if (i) {
@@ -1295,7 +1295,7 @@
 			DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
 
 		while (lp->cmd_head != NULL) {
-			CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
+			CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd));
 			if (!(lp->cmd_head->status & STAT_C))
 				break;
 
@@ -1358,7 +1358,7 @@
 			}
 			ptr->v_next = NULL;
 		        ptr->b_next = I596_NULL;
-			CHECK_WBACK(ptr, sizeof(struct i596_cmd));
+			CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd));
 			lp->last_cmd = jiffies;
 		}
 
@@ -1372,13 +1372,13 @@
 
 			ptr->command &= 0x1fff;
 			ptr = ptr->v_next;
-			CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
+			CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd));
 		}
 
 		if ((lp->cmd_head != NULL))
 			ack_cmd |= CUC_START;
 		lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
-		CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
+		CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb));
 	}
 	if ((status & 0x1000) || (status & 0x4000)) {
 		if ((status & 0x4000))
@@ -1397,7 +1397,7 @@
 	}
 	wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
 	lp->scb.command = ack_cmd;
-	CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
 
 	/* DANGER: I suspect that some kind of interrupt
 	 acknowledgement aside from acking the 82596 might be needed
@@ -1426,7 +1426,7 @@
 
 	wait_cmd(dev, lp, 100, "close1 timed out");
 	lp->scb.command = CUC_ABORT | RX_ABORT;
-	CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
 
 	CA(dev);
 
@@ -1486,7 +1486,7 @@
 			       dev->name);
 		else {
 			lp->cf_cmd.cmd.command = CmdConfigure;
-			CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
+			CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd));
 			i596_add_cmd(dev, &lp->cf_cmd.cmd);
 		}
 	}
@@ -1514,7 +1514,7 @@
 				DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
 						dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
 		}
-		CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
+		CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd));
 		i596_add_cmd(dev, &cmd->cmd);
 	}
 }
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 9997081..d9f48bb 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -277,9 +277,11 @@
  *
  * Actual routine to reset the adapter when a timeout on Tx has occurred
  */
-static void mv643xx_eth_tx_timeout_task(struct net_device *dev)
+static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
 {
-	struct mv643xx_private *mp = netdev_priv(dev);
+	struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private,
+						  tx_timeout_task);
+	struct net_device *dev = mp->mii.dev; /* yuck */
 
 	if (!netif_running(dev))
 		return;
@@ -1360,8 +1362,7 @@
 #endif
 
 	/* Configure the timeout task */
-	INIT_WORK(&mp->tx_timeout_task,
-			(void (*)(void *))mv643xx_eth_tx_timeout_task, dev);
+	INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task);
 
 	spin_lock_init(&mp->lock);
 
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 36350e6..38df428 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2615,9 +2615,10 @@
  * This watchdog is used to check whether the board has suffered
  * from a parity error and needs to be recovered.
  */
-static void myri10ge_watchdog(void *arg)
+static void myri10ge_watchdog(struct work_struct *work)
 {
-	struct myri10ge_priv *mgp = arg;
+	struct myri10ge_priv *mgp =
+		container_of(work, struct myri10ge_priv, watchdog_work);
 	u32 reboot;
 	int status;
 	u16 cmd, vendor;
@@ -2887,7 +2888,7 @@
 		    (unsigned long)mgp);
 
 	SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
-	INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+	INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
 	status = register_netdev(netdev);
 	if (status != 0) {
 		dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index d925053..9c588af 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -714,6 +714,7 @@
 	spinlock_t lock;
 	struct work_struct watchdog_task;
 	struct work_struct tx_timeout_task;
+	struct net_device *netdev;
 	struct timer_list watchdog_timer;
 
 	u32 curr_window;
@@ -921,7 +922,7 @@
 		    struct netxen_port *port);
 int netxen_nic_rx_has_work(struct netxen_adapter *adapter);
 int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
-void netxen_watchdog_task(unsigned long v);
+void netxen_watchdog_task(struct work_struct *work);
 void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
 			    u32 ringid);
 void netxen_process_cmd_ring(unsigned long data);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 0dca029..eae1823 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -710,12 +710,13 @@
 	return rv;
 }
 
-void netxen_watchdog_task(unsigned long v)
+void netxen_watchdog_task(struct work_struct *work)
 {
 	int port_num;
 	struct netxen_port *port;
 	struct net_device *netdev;
-	struct netxen_adapter *adapter = (struct netxen_adapter *)v;
+	struct netxen_adapter *adapter =
+		container_of(work, struct netxen_adapter, watchdog_task);
 
 	if (netxen_nic_check_temp(adapter))
 		return;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 1cb662d..df0bb36 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -64,7 +64,7 @@
 static int netxen_nic_close(struct net_device *netdev);
 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
 static void netxen_tx_timeout(struct net_device *netdev);
-static void netxen_tx_timeout_task(struct net_device *netdev);
+static void netxen_tx_timeout_task(struct work_struct *work);
 static void netxen_watchdog(unsigned long);
 static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
 static int netxen_nic_ioctl(struct net_device *netdev,
@@ -274,8 +274,7 @@
 	adapter->ahw.xg_linkup = 0;
 	adapter->watchdog_timer.function = &netxen_watchdog;
 	adapter->watchdog_timer.data = (unsigned long)adapter;
-	INIT_WORK(&adapter->watchdog_task,
-		  (void (*)(void *))netxen_watchdog_task, adapter);
+	INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
 	adapter->ahw.pdev = pdev;
 	adapter->proc_cmd_buf_counter = 0;
 	pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id);
@@ -379,8 +378,8 @@
 								  dev_addr);
 			}
 		}
-		INIT_WORK(&adapter->tx_timeout_task,
-			  (void (*)(void *))netxen_tx_timeout_task, netdev);
+		adapter->netdev = netdev;
+		INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
 		netif_carrier_off(netdev);
 		netif_stop_queue(netdev);
 
@@ -938,18 +937,20 @@
 	schedule_work(&adapter->tx_timeout_task);
 }
 
-static void netxen_tx_timeout_task(struct net_device *netdev)
+static void netxen_tx_timeout_task(struct work_struct *work)
 {
-	struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
+	struct netxen_adapter *adapter =
+		container_of(work, struct netxen_adapter, tx_timeout_task);
+	struct net_device *netdev = adapter->netdev;
 	unsigned long flags;
 
 	printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
 	       netxen_nic_driver_name, netdev->name);
 
-	spin_lock_irqsave(&port->adapter->lock, flags);
+	spin_lock_irqsave(&adapter->lock, flags);
 	netxen_nic_close(netdev);
 	netxen_nic_open(netdev);
-	spin_unlock_irqrestore(&port->adapter->lock, flags);
+	spin_unlock_irqrestore(&adapter->lock, flags);
 	netdev->trans_start = jiffies;
 	netif_wake_queue(netdev);
 }
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index b0127c7..312e0e3 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -427,6 +427,7 @@
 	u8			__iomem *base;
 
 	struct pci_dev		*pci_dev;
+	struct net_device	*ndev;
 
 #ifdef NS83820_VLAN_ACCEL_SUPPORT
 	struct vlan_group	*vlgrp;
@@ -631,10 +632,10 @@
 }
 
 /* REFILL */
-static inline void queue_refill(void *_dev)
+static inline void queue_refill(struct work_struct *work)
 {
-	struct net_device *ndev = _dev;
-	struct ns83820 *dev = PRIV(ndev);
+	struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
+	struct net_device *ndev = dev->ndev;
 
 	rx_refill(ndev, GFP_KERNEL);
 	if (dev->rx_info.up)
@@ -1841,6 +1842,7 @@
 
 	ndev = alloc_etherdev(sizeof(struct ns83820));
 	dev = PRIV(ndev);
+	dev->ndev = ndev;
 	err = -ENOMEM;
 	if (!dev)
 		goto out;
@@ -1853,7 +1855,7 @@
 	SET_MODULE_OWNER(ndev);
 	SET_NETDEV_DEV(ndev, &pci_dev->dev);
 
-	INIT_WORK(&dev->tq_refill, queue_refill, ndev);
+	INIT_WORK(&dev->tq_refill, queue_refill);
 	tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
 
 	err = pci_enable_device(pci_dev);
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 69813406..8478dca 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -332,6 +332,7 @@
  */
 
 typedef struct local_info_t {
+	struct net_device	*dev;
 	struct pcmcia_device	*p_dev;
     dev_node_t node;
     struct net_device_stats stats;
@@ -353,7 +354,7 @@
  */
 static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void do_tx_timeout(struct net_device *dev);
-static void xirc2ps_tx_timeout_task(void *data);
+static void xirc2ps_tx_timeout_task(struct work_struct *work);
 static struct net_device_stats *do_get_stats(struct net_device *dev);
 static void set_addresses(struct net_device *dev);
 static void set_multicast_list(struct net_device *dev);
@@ -567,6 +568,7 @@
     if (!dev)
 	    return -ENOMEM;
     local = netdev_priv(dev);
+    local->dev = dev;
     local->p_dev = link;
     link->priv = dev;
 
@@ -591,7 +593,7 @@
 #ifdef HAVE_TX_TIMEOUT
     dev->tx_timeout = do_tx_timeout;
     dev->watchdog_timeo = TX_TIMEOUT;
-    INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
+    INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task);
 #endif
 
     return xirc2ps_config(link);
@@ -1324,9 +1326,11 @@
 /*====================================================================*/
 
 static void
-xirc2ps_tx_timeout_task(void *data)
+xirc2ps_tx_timeout_task(struct work_struct *work)
 {
-    struct net_device *dev = data;
+	local_info_t *local =
+		container_of(work, local_info_t, tx_timeout_task);
+	struct net_device *dev = local->dev;
     /* reset the card */
     do_reset(dev,1);
     dev->trans_start = jiffies;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 88237bd..4044bb1 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -397,7 +397,7 @@
 EXPORT_SYMBOL(phy_start_aneg);
 
 
-static void phy_change(void *data);
+static void phy_change(struct work_struct *work);
 static void phy_timer(unsigned long data);
 
 /* phy_start_machine:
@@ -555,7 +555,7 @@
 {
 	int err = 0;
 
-	INIT_WORK(&phydev->phy_queue, phy_change, phydev);
+	INIT_WORK(&phydev->phy_queue, phy_change);
 
 	if (request_irq(phydev->irq, phy_interrupt,
 				IRQF_SHARED,
@@ -598,10 +598,11 @@
 
 
 /* Scheduled by the phy_interrupt/timer to handle PHY changes */
-static void phy_change(void *data)
+static void phy_change(struct work_struct *work)
 {
 	int err;
-	struct phy_device *phydev = data;
+	struct phy_device *phydev =
+		container_of(work, struct phy_device, phy_queue);
 
 	err = phy_disable_interrupts(phydev);
 
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 71afb27..6bb085f 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -138,9 +138,9 @@
 #define PLIP_NIBBLE_WAIT        3000
 
 /* Bottom halves */
-static void plip_kick_bh(struct net_device *dev);
-static void plip_bh(struct net_device *dev);
-static void plip_timer_bh(struct net_device *dev);
+static void plip_kick_bh(struct work_struct *work);
+static void plip_bh(struct work_struct *work);
+static void plip_timer_bh(struct work_struct *work);
 
 /* Interrupt handler */
 static void plip_interrupt(int irq, void *dev_id);
@@ -207,9 +207,10 @@
 
 struct net_local {
 	struct net_device_stats enet_stats;
+	struct net_device *dev;
 	struct work_struct immediate;
-	struct work_struct deferred;
-	struct work_struct timer;
+	struct delayed_work deferred;
+	struct delayed_work timer;
 	struct plip_local snd_data;
 	struct plip_local rcv_data;
 	struct pardevice *pardev;
@@ -306,11 +307,11 @@
 	nl->nibble	= PLIP_NIBBLE_WAIT;
 
 	/* Initialize task queue structures */
-	INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
-	INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
+	INIT_WORK(&nl->immediate, plip_bh);
+	INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
 
 	if (dev->irq == -1)
-		INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
+		INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
 
 	spin_lock_init(&nl->lock);
 }
@@ -319,9 +320,10 @@
    This routine is kicked by do_timer().
    Request `plip_bh' to be invoked. */
 static void
-plip_kick_bh(struct net_device *dev)
+plip_kick_bh(struct work_struct *work)
 {
-	struct net_local *nl = netdev_priv(dev);
+	struct net_local *nl =
+		container_of(work, struct net_local, deferred.work);
 
 	if (nl->is_deferred)
 		schedule_work(&nl->immediate);
@@ -362,9 +364,9 @@
 
 /* Bottom half handler of PLIP. */
 static void
-plip_bh(struct net_device *dev)
+plip_bh(struct work_struct *work)
 {
-	struct net_local *nl = netdev_priv(dev);
+	struct net_local *nl = container_of(work, struct net_local, immediate);
 	struct plip_local *snd = &nl->snd_data;
 	struct plip_local *rcv = &nl->rcv_data;
 	plip_func f;
@@ -372,20 +374,21 @@
 
 	nl->is_deferred = 0;
 	f = connection_state_table[nl->connection];
-	if ((r = (*f)(dev, nl, snd, rcv)) != OK
-	    && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
+	if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
+	    && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
 		nl->is_deferred = 1;
 		schedule_delayed_work(&nl->deferred, 1);
 	}
 }
 
 static void
-plip_timer_bh(struct net_device *dev)
+plip_timer_bh(struct work_struct *work)
 {
-	struct net_local *nl = netdev_priv(dev);
+	struct net_local *nl =
+		container_of(work, struct net_local, timer.work);
 
 	if (!(atomic_read (&nl->kill_timer))) {
-		plip_interrupt (-1, dev);
+		plip_interrupt (-1, nl->dev);
 
 		schedule_delayed_work(&nl->timer, 1);
 	}
@@ -1284,6 +1287,7 @@
 		}
 
 		nl = netdev_priv(dev);
+		nl->dev = dev;
 		nl->pardev = parport_register_device(port, name, plip_preempt,
 						 plip_wakeup, plip_interrupt,
 						 0, dev);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index ec640f6..d79d141 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2008,7 +2008,7 @@
 			       "%s: Another function issued a reset to the "
 			       "chip. ISR value = %x.\n", ndev->name, value);
 		}
-		queue_work(qdev->workqueue, &qdev->reset_work);
+		queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
 		spin_unlock(&qdev->adapter_lock);
 	} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
 		ql_disable_interrupts(qdev);
@@ -3182,11 +3182,13 @@
 	/*
 	 * Wake up the worker to process this event.
 	 */
-	queue_work(qdev->workqueue, &qdev->tx_timeout_work);
+	queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
 }
 
-static void ql_reset_work(struct ql3_adapter *qdev)
+static void ql_reset_work(struct work_struct *work)
 {
+	struct ql3_adapter *qdev =
+		container_of(work, struct ql3_adapter, reset_work.work);
 	struct net_device *ndev = qdev->ndev;
 	u32 value;
 	struct ql_tx_buf_cb *tx_cb;
@@ -3278,9 +3280,12 @@
 	}
 }
 
-static void ql_tx_timeout_work(struct ql3_adapter *qdev)
+static void ql_tx_timeout_work(struct work_struct *work)
 {
-	ql_cycle_adapter(qdev,QL_DO_RESET);
+	struct ql3_adapter *qdev =
+		container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+	ql_cycle_adapter(qdev, QL_DO_RESET);
 }
 
 static void ql_get_board_info(struct ql3_adapter *qdev)
@@ -3459,9 +3464,8 @@
 	netif_stop_queue(ndev);
 
 	qdev->workqueue = create_singlethread_workqueue(ndev->name);
-	INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
-	INIT_WORK(&qdev->tx_timeout_work,
-		  (void (*)(void *))ql_tx_timeout_work, qdev);
+	INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+	INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
 
 	init_timer(&qdev->adapter_timer);
 	qdev->adapter_timer.function = ql3xxx_timer;
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 65da2c0..ea94de7 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -1186,8 +1186,8 @@
 	u32 numPorts;
 	struct net_device_stats stats;
 	struct workqueue_struct *workqueue;
-	struct work_struct reset_work;
-	struct work_struct tx_timeout_work;
+	struct delayed_work reset_work;
+	struct delayed_work tx_timeout_work;
 	u32 max_frame_size;
 };
 
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 45d3ca4..85a392f 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -424,6 +424,7 @@
 struct rtl8169_private {
 	void __iomem *mmio_addr;	/* memory map physical address */
 	struct pci_dev *pci_dev;	/* Index of PCI device */
+	struct net_device *dev;
 	struct net_device_stats stats;	/* statistics of net device */
 	spinlock_t lock;		/* spin lock flag */
 	u32 msg_enable;
@@ -455,7 +456,7 @@
 	void (*phy_reset_enable)(void __iomem *);
 	unsigned int (*phy_reset_pending)(void __iomem *);
 	unsigned int (*link_ok)(void __iomem *);
-	struct work_struct task;
+	struct delayed_work task;
 	unsigned wol_enabled : 1;
 };
 
@@ -1510,6 +1511,7 @@
 	SET_MODULE_OWNER(dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	tp = netdev_priv(dev);
+	tp->dev = dev;
 	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
 
 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
@@ -1782,7 +1784,7 @@
 	if (retval < 0)
 		goto err_free_rx;
 
-	INIT_WORK(&tp->task, NULL, dev);
+	INIT_DELAYED_WORK(&tp->task, NULL);
 
 	rtl8169_hw_start(dev);
 
@@ -2105,11 +2107,11 @@
 	tp->cur_tx = tp->dirty_tx = 0;
 }
 
-static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
+static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 
-	PREPARE_WORK(&tp->task, task, dev);
+	PREPARE_DELAYED_WORK(&tp->task, task);
 	schedule_delayed_work(&tp->task, 4);
 }
 
@@ -2128,9 +2130,11 @@
 	netif_poll_enable(dev);
 }
 
-static void rtl8169_reinit_task(void *_data)
+static void rtl8169_reinit_task(struct work_struct *work)
 {
-	struct net_device *dev = _data;
+	struct rtl8169_private *tp =
+		container_of(work, struct rtl8169_private, task.work);
+	struct net_device *dev = tp->dev;
 	int ret;
 
 	if (netif_running(dev)) {
@@ -2153,10 +2157,11 @@
 	}
 }
 
-static void rtl8169_reset_task(void *_data)
+static void rtl8169_reset_task(struct work_struct *work)
 {
-	struct net_device *dev = _data;
-	struct rtl8169_private *tp = netdev_priv(dev);
+	struct rtl8169_private *tp =
+		container_of(work, struct rtl8169_private, task.work);
+	struct net_device *dev = tp->dev;
 
 	if (!netif_running(dev))
 		return;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 33569ec..250cdbe 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5872,9 +5872,9 @@
  * Description: Sets the link status for the adapter
  */
 
-static void s2io_set_link(unsigned long data)
+static void s2io_set_link(struct work_struct *work)
 {
-	nic_t *nic = (nic_t *) data;
+	nic_t *nic = container_of(work, nic_t, set_link_task);
 	struct net_device *dev = nic->dev;
 	XENA_dev_config_t __iomem *bar0 = nic->bar0;
 	register u64 val64;
@@ -6379,10 +6379,10 @@
  * spin lock.
  */
 
-static void s2io_restart_nic(unsigned long data)
+static void s2io_restart_nic(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *) data;
-	nic_t *sp = dev->priv;
+	nic_t *sp = container_of(work, nic_t, rst_timer_task);
+	struct net_device *dev = sp->dev;
 
 	s2io_card_down(sp);
 	if (s2io_card_up(sp)) {
@@ -6992,10 +6992,8 @@
 
 	dev->tx_timeout = &s2io_tx_watchdog;
 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
-	INIT_WORK(&sp->rst_timer_task,
-		  (void (*)(void *)) s2io_restart_nic, dev);
-	INIT_WORK(&sp->set_link_task,
-		  (void (*)(void *)) s2io_set_link, sp);
+	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
+	INIT_WORK(&sp->set_link_task, s2io_set_link);
 
 	pci_save_state(sp->pdev);
 
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 12b719f..3b0bafd 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1000,7 +1000,7 @@
 static irqreturn_t s2io_isr(int irq, void *dev_id);
 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
 static const struct ethtool_ops netdev_ethtool_ops;
-static void s2io_set_link(unsigned long data);
+static void s2io_set_link(struct work_struct *work);
 static int s2io_set_swapper(nic_t * sp);
 static void s2io_card_down(nic_t *nic);
 static int s2io_card_up(nic_t *nic);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index aaba458..b70ed79 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -280,6 +280,7 @@
 struct sis190_private {
 	void __iomem *mmio_addr;
 	struct pci_dev *pci_dev;
+	struct net_device *dev;
 	struct net_device_stats stats;
 	spinlock_t lock;
 	u32 rx_buf_sz;
@@ -897,10 +898,11 @@
 	netif_start_queue(dev);
 }
 
-static void sis190_phy_task(void * data)
+static void sis190_phy_task(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct sis190_private *tp = netdev_priv(dev);
+	struct sis190_private *tp =
+		container_of(work, struct sis190_private, phy_task);
+	struct net_device *dev = tp->dev;
 	void __iomem *ioaddr = tp->mmio_addr;
 	int phy_id = tp->mii_if.phy_id;
 	u16 val;
@@ -1047,7 +1049,7 @@
 	if (rc < 0)
 		goto err_free_rx_1;
 
-	INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+	INIT_WORK(&tp->phy_task, sis190_phy_task);
 
 	sis190_request_timer(dev);
 
@@ -1436,6 +1438,7 @@
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	tp = netdev_priv(dev);
+	tp->dev = dev;
 	tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
 
 	rc = pci_enable_device(pdev);
@@ -1798,7 +1801,7 @@
 
 	sis190_init_rxfilter(dev);
 
-	INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+	INIT_WORK(&tp->phy_task, sis190_phy_task);
 
 	dev->open = sis190_open;
 	dev->stop = sis190_close;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 5513907..b60f045 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1327,10 +1327,11 @@
  * Since internal PHY is wired to a level triggered pin, can't
  * get an interrupt when carrier is detected.
  */
-static void xm_link_timer(void *arg)
+static void xm_link_timer(struct work_struct *work)
 {
-	struct net_device *dev = arg;
-	struct skge_port *skge = netdev_priv(arg);
+	struct skge_port *skge =
+		container_of(work, struct skge_port, link_thread.work);
+	struct net_device *dev = skge->netdev;
  	struct skge_hw *hw = skge->hw;
 	int port = skge->port;
 
@@ -3072,9 +3073,9 @@
  * because accessing phy registers requires spin wait which might
  * cause excess interrupt latency.
  */
-static void skge_extirq(void *arg)
+static void skge_extirq(struct work_struct *work)
 {
-	struct skge_hw *hw = arg;
+	struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
 	int port;
 
 	mutex_lock(&hw->phy_mutex);
@@ -3456,7 +3457,7 @@
 	skge->port = port;
 
 	/* Only used for Genesis XMAC */
-	INIT_WORK(&skge->link_thread, xm_link_timer, dev);
+	INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
 
 	if (hw->chip_id != CHIP_ID_GENESIS) {
 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@ -3543,7 +3544,7 @@
 
 	hw->pdev = pdev;
 	mutex_init(&hw->phy_mutex);
-	INIT_WORK(&hw->phy_work, skge_extirq, hw);
+	INIT_WORK(&hw->phy_work, skge_extirq);
 	spin_lock_init(&hw->hw_lock);
 
 	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 537c0aa..23e5275 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2456,7 +2456,7 @@
 
 	struct net_device_stats net_stats;
 
-	struct work_struct   link_thread;
+	struct delayed_work  link_thread;
 	enum pause_control   flow_control;
 	enum pause_status    flow_status;
 	u8		     rx_csum;
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 95b6478..e62a958 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -210,6 +210,7 @@
 
 	/* work queue */
 	struct work_struct phy_configure;
+	struct net_device *dev;
 	int	work_pending;
 
 	spinlock_t lock;
@@ -1114,10 +1115,11 @@
  * of autonegotiation.)  If the RPC ANEG bit is cleared, the selection
  * is controlled by the RPC SPEED and RPC DPLX bits.
  */
-static void smc_phy_configure(void *data)
+static void smc_phy_configure(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct smc_local *lp = netdev_priv(dev);
+	struct smc_local *lp =
+		container_of(work, struct smc_local, phy_configure);
+	struct net_device *dev = lp->dev;
 	void __iomem *ioaddr = lp->base;
 	int phyaddr = lp->mii.phy_id;
 	int my_phy_caps; /* My PHY capabilities */
@@ -1592,7 +1594,7 @@
 
 	/* Configure the PHY, initialize the link state */
 	if (lp->phy_type != 0)
-		smc_phy_configure(dev);
+		smc_phy_configure(&lp->phy_configure);
 	else {
 		spin_lock_irq(&lp->lock);
 		smc_10bt_check_media(dev, 1);
@@ -1972,7 +1974,8 @@
 #endif
 
 	tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
-	INIT_WORK(&lp->phy_configure, smc_phy_configure, dev);
+	INIT_WORK(&lp->phy_configure, smc_phy_configure);
+	lp->dev = dev;
 	lp->mii.phy_id_mask = 0x1f;
 	lp->mii.reg_num_mask = 0x1f;
 	lp->mii.force_media = 0;
@@ -2322,7 +2325,7 @@
 			smc_reset(ndev);
 			smc_enable(ndev);
 			if (lp->phy_type != 0)
-				smc_phy_configure(ndev);
+				smc_phy_configure(&lp->phy_configure);
 			netif_device_attach(ndev);
 		}
 	}
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 13e0a43..ebb6aa3 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1939,10 +1939,11 @@
  * called as task when tx hangs, resets interface (if interface is up)
  */
 static void
-spider_net_tx_timeout_task(void *data)
+spider_net_tx_timeout_task(struct work_struct *work)
 {
-	struct net_device *netdev = data;
-	struct spider_net_card *card = netdev_priv(netdev);
+	struct spider_net_card *card =
+		container_of(work, struct spider_net_card, tx_timeout_task);
+	struct net_device *netdev = card->netdev;
 
 	if (!(netdev->flags & IFF_UP))
 		goto out;
@@ -2116,7 +2117,7 @@
 	card = netdev_priv(netdev);
 	card->netdev = netdev;
 	card->msg_enable = SPIDER_NET_DEFAULT_MSG;
-	INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
+	INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
 	init_waitqueue_head(&card->waitq);
 	atomic_set(&card->tx_timeout_task_counter, 0);
 
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index cf44e72..785e4a5 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2282,9 +2282,9 @@
 	}
 }
 
-static void gem_reset_task(void *data)
+static void gem_reset_task(struct work_struct *work)
 {
-	struct gem *gp = (struct gem *) data;
+	struct gem *gp = container_of(work, struct gem, reset_task);
 
 	mutex_lock(&gp->pm_mutex);
 
@@ -3044,7 +3044,7 @@
 	gp->link_timer.function = gem_link_timer;
 	gp->link_timer.data = (unsigned long) gp;
 
-	INIT_WORK(&gp->reset_task, gem_reset_task, gp);
+	INIT_WORK(&gp->reset_task, gem_reset_task);
 
 	gp->lstate = link_down;
 	gp->timer_ticks = 0;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index c20bb99..d9123c9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3654,9 +3654,9 @@
 }
 #endif
 
-static void tg3_reset_task(void *_data)
+static void tg3_reset_task(struct work_struct *work)
 {
-	struct tg3 *tp = _data;
+	struct tg3 *tp = container_of(work, struct tg3, reset_task);
 	unsigned int restart_timer;
 
 	tg3_full_lock(tp, 0);
@@ -11734,7 +11734,7 @@
 #endif
 	spin_lock_init(&tp->lock);
 	spin_lock_init(&tp->indirect_lock);
-	INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+	INIT_WORK(&tp->reset_task, tg3_reset_task);
 
 	tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
 	if (tp->regs == 0UL) {
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index e14f5a0..f85f002 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -296,6 +296,7 @@
 static int	TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
 static int      TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent);
 static void	TLan_tx_timeout( struct net_device *dev);
+static void	TLan_tx_timeout_work(struct work_struct *work);
 static int 	tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
 
 static u32	TLan_HandleInvalid( struct net_device *, u16 );
@@ -562,6 +563,7 @@
 	priv = netdev_priv(dev);
 
 	priv->pciDev = pdev;
+	priv->dev = dev;
 
 	/* Is this a PCI device? */
 	if (pdev) {
@@ -634,7 +636,7 @@
 
 	/* This will be used when we get an adapter error from
 	 * within our irq handler */
-	INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev);
+	INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
 
 	spin_lock_init(&priv->lock);
 
@@ -1040,6 +1042,25 @@
 }
 
 
+	/***************************************************************
+	 * 	TLan_tx_timeout_work
+	 *
+	 * 	Returns: nothing
+	 *
+	 * 	Params:
+	 * 		work	work item of device which timed out
+	 *
+	 **************************************************************/
+
+static void TLan_tx_timeout_work(struct work_struct *work)
+{
+	TLanPrivateInfo	*priv =
+		container_of(work, TLanPrivateInfo, tlan_tqueue);
+
+	TLan_tx_timeout(priv->dev);
+}
+
+
 
 	/***************************************************************
 	 *	TLan_StartTx
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index a44e2f2..41ce0b6 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -170,6 +170,7 @@
 typedef struct tlan_private_tag {
 	struct net_device       *nextDevice;
 	struct pci_dev		*pciDev;
+	struct net_device       *dev;
 	void			*dmaStorage;
 	dma_addr_t		dmaStorageDMA;
 	unsigned int		dmaSize;
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index fa3a2bb..942b839 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -26,10 +26,11 @@
 
 /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
    of available transceivers.  */
-void t21142_media_task(void *data)
+void t21142_media_task(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct tulip_private *tp = netdev_priv(dev);
+	struct tulip_private *tp =
+		container_of(work, struct tulip_private, media_work);
+	struct net_device *dev = tp->dev;
 	void __iomem *ioaddr = tp->base_addr;
 	int csr12 = ioread32(ioaddr + CSR12);
 	int next_tick = 60*HZ;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 066e5d6..df326fe 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -18,10 +18,11 @@
 #include "tulip.h"
 
 
-void tulip_media_task(void *data)
+void tulip_media_task(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct tulip_private *tp = netdev_priv(dev);
+	struct tulip_private *tp =
+		container_of(work, struct tulip_private, media_work);
+	struct net_device *dev = tp->dev;
 	void __iomem *ioaddr = tp->base_addr;
 	u32 csr12 = ioread32(ioaddr + CSR12);
 	int next_tick = 2*HZ;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index ad107f4..25f25da 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -44,7 +44,7 @@
 	int valid_intrs;	/* CSR7 interrupt enable settings */
 	int flags;
 	void (*media_timer) (unsigned long);
-	void (*media_task) (void *);
+	work_func_t media_task;
 };
 
 
@@ -392,6 +392,7 @@
 	int csr12_shadow;
 	int pad0;		/* Used for 8-byte alignment */
 	struct work_struct media_work;
+	struct net_device *dev;
 };
 
 
@@ -406,7 +407,7 @@
 
 /* 21142.c */
 extern u16 t21142_csr14[];
-void t21142_media_task(void *data);
+void t21142_media_task(struct work_struct *work);
 void t21142_start_nway(struct net_device *dev);
 void t21142_lnk_change(struct net_device *dev, int csr5);
 
@@ -444,7 +445,7 @@
 void pnic_timer(unsigned long data);
 
 /* timer.c */
-void tulip_media_task(void *data);
+void tulip_media_task(struct work_struct *work);
 void mxic_timer(unsigned long data);
 void comet_timer(unsigned long data);
 
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0aee618..5a35354 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1367,6 +1367,7 @@
 	 * it is zeroed and aligned in alloc_etherdev
 	 */
 	tp = netdev_priv(dev);
+	tp->dev = dev;
 
 	tp->rx_ring = pci_alloc_consistent(pdev,
 					   sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
@@ -1389,7 +1390,7 @@
 	tp->timer.data = (unsigned long)dev;
 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
 
-	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev);
+	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
 
 	dev->base_addr = (unsigned long)ioaddr;
 
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 931cbdf..b2a23ae 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -125,8 +125,8 @@
 static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
 static void cpc_tty_flush_buffer(struct tty_struct *tty);
 static void cpc_tty_hangup(struct tty_struct *tty);
-static void cpc_tty_rx_work(void *data);
-static void cpc_tty_tx_work(void *data);
+static void cpc_tty_rx_work(struct work_struct *work);
+static void cpc_tty_tx_work(struct work_struct *work);
 static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
 static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
 static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
@@ -261,8 +261,8 @@
 	cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
 	cpc_tty->pc300dev = pc300dev; 
 
-	INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty);
-	INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port);
+	INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work);
+	INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work);
 	
 	cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
 
@@ -659,21 +659,23 @@
  * o call the line disc. read
  * o free memory
  */
-static void cpc_tty_rx_work(void * data)
+static void cpc_tty_rx_work(struct work_struct *work)
 {
+	st_cpc_tty_area *cpc_tty;
 	unsigned long port;
 	int i, j;
-	st_cpc_tty_area *cpc_tty; 
 	volatile st_cpc_rx_buf *buf;
 	char flags=0,flg_rx=1; 
 	struct tty_ldisc *ld;
 
 	if (cpc_tty_cnt == 0) return;
-
 	
 	for (i=0; (i < 4) && flg_rx ; i++) {
 		flg_rx = 0;
-		port = (unsigned long)data;
+
+		cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work);
+		port = cpc_tty - cpc_tty_area;
+
 		for (j=0; j < CPC_TTY_NPORTS; j++) {
 			cpc_tty = &cpc_tty_area[port];
 		
@@ -882,9 +884,10 @@
  * o if need call line discipline wakeup
  * o call wake_up_interruptible
  */
-static void cpc_tty_tx_work(void *data)
+static void cpc_tty_tx_work(struct work_struct *work)
 {
-	st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; 
+	st_cpc_tty_area *cpc_tty =
+		container_of(work, st_cpc_tty_area, tty_tx_work);
 	struct tty_struct *tty; 
 
 	CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index efcdaf1..44a2270 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -49,6 +49,7 @@
 #include <asm/uaccess.h>
 #include <net/ieee80211.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include "airo.h"
 
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 94dfb92..8286678 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -819,7 +819,7 @@
 	struct tasklet_struct isr_tasklet;
 
 	/* Periodic tasks */
-	struct work_struct periodic_work;
+	struct delayed_work periodic_work;
 	unsigned int periodic_state;
 
 	struct work_struct restart_work;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 5b3c273..2ec2e5a 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3215,9 +3215,10 @@
 	schedule_delayed_work(&bcm->periodic_work, HZ * 15);
 }
 
-static void bcm43xx_periodic_work_handler(void *d)
+static void bcm43xx_periodic_work_handler(struct work_struct *work)
 {
-	struct bcm43xx_private *bcm = d;
+	struct bcm43xx_private *bcm =
+		container_of(work, struct bcm43xx_private, periodic_work.work);
 	struct net_device *net_dev = bcm->net_dev;
 	unsigned long flags;
 	u32 savedirqs = 0;
@@ -3279,11 +3280,11 @@
 
 void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
 {
-	struct work_struct *work = &(bcm->periodic_work);
+	struct delayed_work *work = &bcm->periodic_work;
 
 	assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
-	INIT_WORK(work, bcm43xx_periodic_work_handler, bcm);
-	schedule_work(work);
+	INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
+	schedule_delayed_work(work, 0);
 }
 
 static void bcm43xx_security_init(struct bcm43xx_private *bcm)
@@ -3635,7 +3636,7 @@
 	bcm43xx_periodic_tasks_setup(bcm);
 
 	/*FIXME: This should be handled by softmac instead. */
-	schedule_work(&bcm->softmac->associnfo.work);
+	schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
 
 out:
 	mutex_unlock(&(bcm)->mutex);
@@ -4182,9 +4183,10 @@
 /* Hard-reset the chip. Do not call this directly.
  * Use bcm43xx_controller_restart()
  */
-static void bcm43xx_chip_reset(void *_bcm)
+static void bcm43xx_chip_reset(struct work_struct *work)
 {
-	struct bcm43xx_private *bcm = _bcm;
+	struct bcm43xx_private *bcm =
+		container_of(work, struct bcm43xx_private, restart_work);
 	struct bcm43xx_phyinfo *phy;
 	int err = -ENODEV;
 
@@ -4211,7 +4213,7 @@
 	if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
 		return;
 	printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
-	INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
+	INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
 	schedule_work(&bcm->restart_work);
 }
 
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index e663518..e89c890 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -35,7 +35,7 @@
 struct net_device_stats *hostap_get_stats(struct net_device *dev);
 void hostap_setup_dev(struct net_device *dev, local_info_t *local,
 		      int main_dev);
-void hostap_set_multicast_list_queue(void *data);
+void hostap_set_multicast_list_queue(struct work_struct *work);
 int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
 int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
 void hostap_cleanup(local_info_t *local);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index ba13125..08bc57a 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -49,10 +49,10 @@
 static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
 static void hostap_event_expired_sta(struct net_device *dev,
 				     struct sta_info *sta);
-static void handle_add_proc_queue(void *data);
+static void handle_add_proc_queue(struct work_struct *work);
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void handle_wds_oper_queue(void *data);
+static void handle_wds_oper_queue(struct work_struct *work);
 static void prism2_send_mgmt(struct net_device *dev,
 			     u16 type_subtype, char *body,
 			     int body_len, u8 *addr, u16 tx_cb_idx);
@@ -807,7 +807,7 @@
 	INIT_LIST_HEAD(&ap->sta_list);
 
 	/* Initialize task queue structure for AP management */
-	INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap);
+	INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
 
 	ap->tx_callback_idx =
 		hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
@@ -815,7 +815,7 @@
 		printk(KERN_WARNING "%s: failed to register TX callback for "
 		       "AP\n", local->dev->name);
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-	INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local);
+	INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
 
 	ap->tx_callback_auth =
 		hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
@@ -1062,9 +1062,10 @@
 }
 
 
-static void handle_add_proc_queue(void *data)
+static void handle_add_proc_queue(struct work_struct *work)
 {
-	struct ap_data *ap = (struct ap_data *) data;
+	struct ap_data *ap = container_of(work, struct ap_data,
+					  add_sta_proc_queue);
 	struct sta_info *sta;
 	char name[20];
 	struct add_sta_proc_data *entry, *prev;
@@ -1952,9 +1953,11 @@
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
 
-static void handle_wds_oper_queue(void *data)
+static void handle_wds_oper_queue(struct work_struct *work)
 {
-	local_info_t *local = data;
+	struct ap_data *ap = container_of(work, struct ap_data,
+					  wds_oper_queue);
+	local_info_t *local = ap->local;
 	struct wds_oper_data *entry, *prev;
 
 	spin_lock_bh(&local->lock);
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ed00ebb..c19e686 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1645,9 +1645,9 @@
 
 /* Called only as scheduled task after noticing card timeout in interrupt
  * context */
-static void handle_reset_queue(void *data)
+static void handle_reset_queue(struct work_struct *work)
 {
-	local_info_t *local = (local_info_t *) data;
+	local_info_t *local = container_of(work, local_info_t, reset_queue);
 
 	printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
 	prism2_hw_reset(local->dev);
@@ -2896,9 +2896,10 @@
 
 /* Called only as a scheduled task when communications quality values should
  * be updated. */
-static void handle_comms_qual_update(void *data)
+static void handle_comms_qual_update(struct work_struct *work)
 {
-	local_info_t *local = data;
+	local_info_t *local =
+		container_of(work, local_info_t, comms_qual_update);
 	prism2_update_comms_qual(local->dev);
 }
 
@@ -3050,9 +3051,9 @@
 }
 
 
-static void handle_set_tim_queue(void *data)
+static void handle_set_tim_queue(struct work_struct *work)
 {
-	local_info_t *local = (local_info_t *) data;
+	local_info_t *local = container_of(work, local_info_t, set_tim_queue);
 	struct set_tim_data *entry;
 	u16 val;
 
@@ -3209,15 +3210,15 @@
 	local->scan_channel_mask = 0xffff;
 
 	/* Initialize task queue structures */
-	INIT_WORK(&local->reset_queue, handle_reset_queue, local);
+	INIT_WORK(&local->reset_queue, handle_reset_queue);
 	INIT_WORK(&local->set_multicast_list_queue,
-		  hostap_set_multicast_list_queue, local->dev);
+		  hostap_set_multicast_list_queue);
 
-	INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local);
+	INIT_WORK(&local->set_tim_queue, handle_set_tim_queue);
 	INIT_LIST_HEAD(&local->set_tim_list);
 	spin_lock_init(&local->set_tim_lock);
 
-	INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local);
+	INIT_WORK(&local->comms_qual_update, handle_comms_qual_update);
 
 	/* Initialize tasklets for handling hardware IRQ related operations
 	 * outside hw IRQ handler */
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 50f72d8..5fd2b1a 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -474,9 +474,9 @@
 
 /* Called only as scheduled task after receiving info frames (used to avoid
  * pending too much time in HW IRQ handler). */
-static void handle_info_queue(void *data)
+static void handle_info_queue(struct work_struct *work)
 {
-	local_info_t *local = (local_info_t *) data;
+	local_info_t *local = container_of(work, local_info_t, info_queue);
 
 	if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
 			       &local->pending_info))
@@ -493,7 +493,7 @@
 {
 	skb_queue_head_init(&local->info_list);
 #ifndef PRISM2_NO_STATION_MODES
-	INIT_WORK(&local->info_queue, handle_info_queue, local);
+	INIT_WORK(&local->info_queue, handle_info_queue);
 #endif /* PRISM2_NO_STATION_MODES */
 }
 
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 53374fc..0796be9 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -767,14 +767,14 @@
 
 /* TODO: to be further implemented as soon as Prism2 fully supports
  *       GroupAddresses and correct documentation is available */
-void hostap_set_multicast_list_queue(void *data)
+void hostap_set_multicast_list_queue(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *) data;
+	local_info_t *local =
+		container_of(work, local_info_t, set_multicast_list_queue);
+	struct net_device *dev = local->dev;
 	struct hostap_interface *iface;
-	local_info_t *local;
 
 	iface = netdev_priv(dev);
-	local = iface->local;
 	if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
 			    local->is_promisc)) {
 		printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 79607b8..1bcd352 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -316,7 +316,7 @@
 				     struct ipw2100_fw *fw);
 static int ipw2100_ucode_download(struct ipw2100_priv *priv,
 				  struct ipw2100_fw *fw);
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv);
+static void ipw2100_wx_event_work(struct work_struct *work);
 static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
 static struct iw_handler_def ipw2100_wx_handler_def;
 
@@ -679,7 +679,8 @@
 			queue_delayed_work(priv->workqueue, &priv->reset_work,
 					   priv->reset_backoff * HZ);
 		else
-			queue_work(priv->workqueue, &priv->reset_work);
+			queue_delayed_work(priv->workqueue, &priv->reset_work,
+					   0);
 
 		if (priv->reset_backoff < MAX_RESET_BACKOFF)
 			priv->reset_backoff++;
@@ -1873,8 +1874,10 @@
 	netif_stop_queue(priv->net_dev);
 }
 
-static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
+static void ipw2100_reset_adapter(struct work_struct *work)
 {
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, reset_work.work);
 	unsigned long flags;
 	union iwreq_data wrqu = {
 		.ap_addr = {
@@ -2071,9 +2074,9 @@
 		return;
 
 	if (priv->status & STATUS_SECURITY_UPDATED)
-		queue_work(priv->workqueue, &priv->security_work);
+		queue_delayed_work(priv->workqueue, &priv->security_work, 0);
 
-	queue_work(priv->workqueue, &priv->wx_event_work);
+	queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
 }
 
 static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -5524,8 +5527,11 @@
 	return err;
 }
 
-static void ipw2100_security_work(struct ipw2100_priv *priv)
+static void ipw2100_security_work(struct work_struct *work)
 {
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, security_work.work);
+
 	/* If we happen to have reconnected before we get a chance to
 	 * process this, then update the security settings--which causes
 	 * a disassociation to occur */
@@ -5748,7 +5754,7 @@
 
 	priv->reset_backoff = 0;
 	mutex_unlock(&priv->action_mutex);
-	ipw2100_reset_adapter(priv);
+	ipw2100_reset_adapter(&priv->reset_work.work);
 	return 0;
 
       done:
@@ -5910,9 +5916,10 @@
 	.get_drvinfo = ipw_ethtool_get_drvinfo,
 };
 
-static void ipw2100_hang_check(void *adapter)
+static void ipw2100_hang_check(struct work_struct *work)
 {
-	struct ipw2100_priv *priv = adapter;
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, hang_check.work);
 	unsigned long flags;
 	u32 rtc = 0xa5a5a5a5;
 	u32 len = sizeof(rtc);
@@ -5952,9 +5959,10 @@
 	spin_unlock_irqrestore(&priv->low_lock, flags);
 }
 
-static void ipw2100_rf_kill(void *adapter)
+static void ipw2100_rf_kill(struct work_struct *work)
 {
-	struct ipw2100_priv *priv = adapter;
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, rf_kill.work);
 	unsigned long flags;
 
 	spin_lock_irqsave(&priv->low_lock, flags);
@@ -6103,14 +6111,11 @@
 
 	priv->workqueue = create_workqueue(DRV_NAME);
 
-	INIT_WORK(&priv->reset_work,
-		  (void (*)(void *))ipw2100_reset_adapter, priv);
-	INIT_WORK(&priv->security_work,
-		  (void (*)(void *))ipw2100_security_work, priv);
-	INIT_WORK(&priv->wx_event_work,
-		  (void (*)(void *))ipw2100_wx_event_work, priv);
-	INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
-	INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
+	INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
+	INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
+	INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
+	INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
+	INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
 
 	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
 		     ipw2100_irq_tasklet, (unsigned long)priv);
@@ -8281,8 +8286,10 @@
 	.get_wireless_stats = ipw2100_wx_wireless_stats,
 };
 
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
+static void ipw2100_wx_event_work(struct work_struct *work)
 {
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, wx_event_work.work);
 	union iwreq_data wrqu;
 	int len = ETH_ALEN;
 
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
index 55b7227..de7d384 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2100.h
@@ -583,11 +583,11 @@
 	struct tasklet_struct irq_tasklet;
 
 	struct workqueue_struct *workqueue;
-	struct work_struct reset_work;
-	struct work_struct security_work;
-	struct work_struct wx_event_work;
-	struct work_struct hang_check;
-	struct work_struct rf_kill;
+	struct delayed_work reset_work;
+	struct delayed_work security_work;
+	struct delayed_work wx_event_work;
+	struct delayed_work hang_check;
+	struct delayed_work rf_kill;
 
 	u32 interrupts;
 	int tx_interrupts;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index c692d01..e82e56b 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -187,9 +187,9 @@
 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
 static void ipw_rx_queue_replenish(void *);
 static int ipw_up(struct ipw_priv *);
-static void ipw_bg_up(void *);
+static void ipw_bg_up(struct work_struct *work);
 static void ipw_down(struct ipw_priv *);
-static void ipw_bg_down(void *);
+static void ipw_bg_down(struct work_struct *work);
 static int ipw_config(struct ipw_priv *);
 static int init_supported_rates(struct ipw_priv *priv,
 				struct ipw_supported_rates *prates);
@@ -862,11 +862,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_link_on(void *data)
+static void ipw_bg_led_link_on(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, led_link_on.work);
 	mutex_lock(&priv->mutex);
-	ipw_led_link_on(data);
+	ipw_led_link_on(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -906,11 +907,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_link_off(void *data)
+static void ipw_bg_led_link_off(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, led_link_off.work);
 	mutex_lock(&priv->mutex);
-	ipw_led_link_off(data);
+	ipw_led_link_off(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -985,11 +987,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_activity_off(void *data)
+static void ipw_bg_led_activity_off(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, led_act_off.work);
 	mutex_lock(&priv->mutex);
-	ipw_led_activity_off(data);
+	ipw_led_activity_off(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -2228,11 +2231,12 @@
 	}
 }
 
-static void ipw_bg_adapter_restart(void *data)
+static void ipw_bg_adapter_restart(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, adapter_restart);
 	mutex_lock(&priv->mutex);
-	ipw_adapter_restart(data);
+	ipw_adapter_restart(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -2249,11 +2253,12 @@
 	}
 }
 
-static void ipw_bg_scan_check(void *data)
+static void ipw_bg_scan_check(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, scan_check.work);
 	mutex_lock(&priv->mutex);
-	ipw_scan_check(data);
+	ipw_scan_check(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -3831,17 +3836,19 @@
 	return 1;
 }
 
-static void ipw_bg_disassociate(void *data)
+static void ipw_bg_disassociate(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, disassociate);
 	mutex_lock(&priv->mutex);
-	ipw_disassociate(data);
+	ipw_disassociate(priv);
 	mutex_unlock(&priv->mutex);
 }
 
-static void ipw_system_config(void *data)
+static void ipw_system_config(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, system_config);
 
 #ifdef CONFIG_IPW2200_PROMISCUOUS
 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
@@ -4208,11 +4215,12 @@
 			   IPW_STATS_INTERVAL);
 }
 
-static void ipw_bg_gather_stats(void *data)
+static void ipw_bg_gather_stats(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, gather_stats.work);
 	mutex_lock(&priv->mutex);
-	ipw_gather_stats(data);
+	ipw_gather_stats(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -4268,8 +4276,8 @@
 		if (!(priv->status & STATUS_ROAMING)) {
 			priv->status |= STATUS_ROAMING;
 			if (!(priv->status & STATUS_SCANNING))
-				queue_work(priv->workqueue,
-					   &priv->request_scan);
+				queue_delayed_work(priv->workqueue,
+						   &priv->request_scan, 0);
 		}
 		return;
 	}
@@ -4607,8 +4615,8 @@
 #ifdef CONFIG_IPW2200_MONITOR
 			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
 				priv->status |= STATUS_SCAN_FORCED;
-				queue_work(priv->workqueue,
-					   &priv->request_scan);
+				queue_delayed_work(priv->workqueue,
+						   &priv->request_scan, 0);
 				break;
 			}
 			priv->status &= ~STATUS_SCAN_FORCED;
@@ -4631,8 +4639,8 @@
 					/* Don't schedule if we aborted the scan */
 					priv->status &= ~STATUS_ROAMING;
 			} else if (priv->status & STATUS_SCAN_PENDING)
-				queue_work(priv->workqueue,
-					   &priv->request_scan);
+				queue_delayed_work(priv->workqueue,
+						   &priv->request_scan, 0);
 			else if (priv->config & CFG_BACKGROUND_SCAN
 				 && priv->status & STATUS_ASSOCIATED)
 				queue_delayed_work(priv->workqueue,
@@ -5055,11 +5063,12 @@
 	ipw_rx_queue_restock(priv);
 }
 
-static void ipw_bg_rx_queue_replenish(void *data)
+static void ipw_bg_rx_queue_replenish(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, rx_replenish);
 	mutex_lock(&priv->mutex);
-	ipw_rx_queue_replenish(data);
+	ipw_rx_queue_replenish(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -5489,9 +5498,10 @@
 	return 1;
 }
 
-static void ipw_merge_adhoc_network(void *data)
+static void ipw_merge_adhoc_network(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, merge_networks);
 	struct ieee80211_network *network = NULL;
 	struct ipw_network_match match = {
 		.network = priv->assoc_network
@@ -5948,11 +5958,12 @@
 			   priv->assoc_request.beacon_interval);
 }
 
-static void ipw_bg_adhoc_check(void *data)
+static void ipw_bg_adhoc_check(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, adhoc_check.work);
 	mutex_lock(&priv->mutex);
-	ipw_adhoc_check(data);
+	ipw_adhoc_check(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -6299,19 +6310,26 @@
 	return err;
 }
 
-static int ipw_request_passive_scan(struct ipw_priv *priv) {
-  	return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
-}
-
-static int ipw_request_scan(struct ipw_priv *priv) {
-	return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
-}
-
-static void ipw_bg_abort_scan(void *data)
+static void ipw_request_passive_scan(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, request_passive_scan);
+  	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
+}
+
+static void ipw_request_scan(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, request_scan.work);
+	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
+}
+
+static void ipw_bg_abort_scan(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, abort_scan);
 	mutex_lock(&priv->mutex);
-	ipw_abort_scan(data);
+	ipw_abort_scan(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -7084,9 +7102,10 @@
 /*
 * background support to run QoS activate functionality
 */
-static void ipw_bg_qos_activate(void *data)
+static void ipw_bg_qos_activate(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, qos_activate);
 
 	if (priv == NULL)
 		return;
@@ -7394,11 +7413,12 @@
 	priv->status &= ~STATUS_ROAMING;
 }
 
-static void ipw_bg_roam(void *data)
+static void ipw_bg_roam(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, roam);
 	mutex_lock(&priv->mutex);
-	ipw_roam(data);
+	ipw_roam(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -7479,8 +7499,8 @@
 						   &priv->request_scan,
 						   SCAN_INTERVAL);
 			else
-				queue_work(priv->workqueue,
-					   &priv->request_scan);
+				queue_delayed_work(priv->workqueue,
+						   &priv->request_scan, 0);
 		}
 
 		return 0;
@@ -7491,11 +7511,12 @@
 	return 1;
 }
 
-static void ipw_bg_associate(void *data)
+static void ipw_bg_associate(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, associate);
 	mutex_lock(&priv->mutex);
-	ipw_associate(data);
+	ipw_associate(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -9410,7 +9431,7 @@
 
 	IPW_DEBUG_WX("Start scan\n");
 
-	queue_work(priv->workqueue, &priv->request_scan);
+	queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
 
 	return 0;
 }
@@ -10547,11 +10568,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_rf_kill(void *data)
+static void ipw_bg_rf_kill(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, rf_kill.work);
 	mutex_lock(&priv->mutex);
-	ipw_rf_kill(data);
+	ipw_rf_kill(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -10582,11 +10604,12 @@
 		queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
 }
 
-static void ipw_bg_link_up(void *data)
+static void ipw_bg_link_up(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, link_up);
 	mutex_lock(&priv->mutex);
-	ipw_link_up(data);
+	ipw_link_up(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -10606,15 +10629,16 @@
 
 	if (!(priv->status & STATUS_EXIT_PENDING)) {
 		/* Queue up another scan... */
-		queue_work(priv->workqueue, &priv->request_scan);
+		queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
 	}
 }
 
-static void ipw_bg_link_down(void *data)
+static void ipw_bg_link_down(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, link_down);
 	mutex_lock(&priv->mutex);
-	ipw_link_down(data);
+	ipw_link_down(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -10626,38 +10650,30 @@
 	init_waitqueue_head(&priv->wait_command_queue);
 	init_waitqueue_head(&priv->wait_state);
 
-	INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
-	INIT_WORK(&priv->associate, ipw_bg_associate, priv);
-	INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
-	INIT_WORK(&priv->system_config, ipw_system_config, priv);
-	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
-	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
-	INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
-	INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
-	INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
-	INIT_WORK(&priv->request_scan,
-		  (void (*)(void *))ipw_request_scan, priv);
-	INIT_WORK(&priv->request_passive_scan,
-		  (void (*)(void *))ipw_request_passive_scan, priv);
-	INIT_WORK(&priv->gather_stats,
-		  (void (*)(void *))ipw_bg_gather_stats, priv);
-	INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
-	INIT_WORK(&priv->roam, ipw_bg_roam, priv);
-	INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
-	INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
-	INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
-	INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
-		  priv);
-	INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
-		  priv);
-	INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
-		  priv);
-	INIT_WORK(&priv->merge_networks,
-		  (void (*)(void *))ipw_merge_adhoc_network, priv);
+	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
+	INIT_WORK(&priv->associate, ipw_bg_associate);
+	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
+	INIT_WORK(&priv->system_config, ipw_system_config);
+	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
+	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
+	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
+	INIT_WORK(&priv->up, ipw_bg_up);
+	INIT_WORK(&priv->down, ipw_bg_down);
+	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
+	INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
+	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
+	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
+	INIT_WORK(&priv->roam, ipw_bg_roam);
+	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
+	INIT_WORK(&priv->link_up, ipw_bg_link_up);
+	INIT_WORK(&priv->link_down, ipw_bg_link_down);
+	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
+	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
+	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
+	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
 
 #ifdef CONFIG_IPW2200_QOS
-	INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
-		  priv);
+	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
 #endif				/* CONFIG_IPW2200_QOS */
 
 	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@ -11190,7 +11206,8 @@
 
 			/* If configure to try and auto-associate, kick
 			 * off a scan. */
-			queue_work(priv->workqueue, &priv->request_scan);
+			queue_delayed_work(priv->workqueue,
+					   &priv->request_scan, 0);
 
 			return 0;
 		}
@@ -11211,11 +11228,12 @@
 	return -EIO;
 }
 
-static void ipw_bg_up(void *data)
+static void ipw_bg_up(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, up);
 	mutex_lock(&priv->mutex);
-	ipw_up(data);
+	ipw_up(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -11282,11 +11300,12 @@
 	ipw_led_radio_off(priv);
 }
 
-static void ipw_bg_down(void *data)
+static void ipw_bg_down(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, down);
 	mutex_lock(&priv->mutex);
-	ipw_down(data);
+	ipw_down(priv);
 	mutex_unlock(&priv->mutex);
 }
 
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index dad5eed..626a240 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1290,21 +1290,21 @@
 
 	struct workqueue_struct *workqueue;
 
-	struct work_struct adhoc_check;
+	struct delayed_work adhoc_check;
 	struct work_struct associate;
 	struct work_struct disassociate;
 	struct work_struct system_config;
 	struct work_struct rx_replenish;
-	struct work_struct request_scan;
+	struct delayed_work request_scan;
   	struct work_struct request_passive_scan;
 	struct work_struct adapter_restart;
-	struct work_struct rf_kill;
+	struct delayed_work rf_kill;
 	struct work_struct up;
 	struct work_struct down;
-	struct work_struct gather_stats;
+	struct delayed_work gather_stats;
 	struct work_struct abort_scan;
 	struct work_struct roam;
-	struct work_struct scan_check;
+	struct delayed_work scan_check;
 	struct work_struct link_up;
 	struct work_struct link_down;
 
@@ -1319,9 +1319,9 @@
 	u32 led_ofdm_on;
 	u32 led_ofdm_off;
 
-	struct work_struct led_link_on;
-	struct work_struct led_link_off;
-	struct work_struct led_act_off;
+	struct delayed_work led_link_on;
+	struct delayed_work led_link_off;
+	struct delayed_work led_act_off;
 	struct work_struct merge_networks;
 
 	struct ipw_cmd_log *cmdlog;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 336caba..936c888 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -980,9 +980,11 @@
 }
 
 /* Search scan results for requested BSSID, join it if found */
-static void orinoco_join_ap(struct net_device *dev)
+static void orinoco_join_ap(struct work_struct *work)
 {
-	struct orinoco_private *priv = netdev_priv(dev);
+	struct orinoco_private *priv =
+		container_of(work, struct orinoco_private, join_work);
+	struct net_device *dev = priv->ndev;
 	struct hermes *hw = &priv->hw;
 	int err;
 	unsigned long flags;
@@ -1055,9 +1057,11 @@
 }
 
 /* Send new BSSID to userspace */
-static void orinoco_send_wevents(struct net_device *dev)
+static void orinoco_send_wevents(struct work_struct *work)
 {
-	struct orinoco_private *priv = netdev_priv(dev);
+	struct orinoco_private *priv =
+		container_of(work, struct orinoco_private, wevent_work);
+	struct net_device *dev = priv->ndev;
 	struct hermes *hw = &priv->hw;
 	union iwreq_data wrqu;
 	int err;
@@ -1864,9 +1868,11 @@
 
 /* This must be called from user context, without locks held - use
  * schedule_work() */
-static void orinoco_reset(struct net_device *dev)
+static void orinoco_reset(struct work_struct *work)
 {
-	struct orinoco_private *priv = netdev_priv(dev);
+	struct orinoco_private *priv =
+		container_of(work, struct orinoco_private, reset_work);
+	struct net_device *dev = priv->ndev;
 	struct hermes *hw = &priv->hw;
 	int err;
 	unsigned long flags;
@@ -2434,9 +2440,9 @@
 	priv->hw_unavailable = 1; /* orinoco_init() must clear this
 				   * before anything else touches the
 				   * hardware */
-	INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
-	INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev);
-	INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev);
+	INIT_WORK(&priv->reset_work, orinoco_reset);
+	INIT_WORK(&priv->join_work, orinoco_join_ap);
+	INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
 
 	netif_carrier_off(dev);
 	priv->last_linkstatus = 0xffff;
@@ -3608,7 +3614,7 @@
 		printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
 
 		/* Firmware reset */
-		orinoco_reset(dev);
+		orinoco_reset(&priv->reset_work);
 	} else {
 		printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
 
@@ -4154,7 +4160,7 @@
 		return 0;
 
 	if (priv->broken_disableport) {
-		orinoco_reset(dev);
+		orinoco_reset(&priv->reset_work);
 		return 0;
 	}
 
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 4a20e45..a87eb51 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -157,8 +157,9 @@
  * schedule_work(), thus we can as well use sleeping semaphore
  * locking */
 void
-prism54_update_stats(islpci_private *priv)
+prism54_update_stats(struct work_struct *work)
 {
+	islpci_private *priv = container_of(work, islpci_private, stats_work);
 	char *data;
 	int j;
 	struct obj_bss bss, *bss2;
@@ -2493,9 +2494,10 @@
  * interrupt context, no locks held.
  */
 void
-prism54_process_trap(void *data)
+prism54_process_trap(struct work_struct *work)
 {
-	struct islpci_mgmtframe *frame = data;
+	struct islpci_mgmtframe *frame =
+		container_of(work, struct islpci_mgmtframe, ws);
 	struct net_device *ndev = frame->ndev;
 	enum oid_num_t n = mgt_oidtonum(frame->header->oid);
 
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index e8183d3..bcfbfb9 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -31,12 +31,12 @@
 void prism54_mib_init(islpci_private *);
 
 struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
-void prism54_update_stats(islpci_private *);
+void prism54_update_stats(struct work_struct *);
 
 void prism54_acl_init(struct islpci_acl *);
 void prism54_acl_clean(struct islpci_acl *);
 
-void prism54_process_trap(void *);
+void prism54_process_trap(struct work_struct *);
 
 void prism54_wpa_bss_ie_init(islpci_private *priv);
 void prism54_wpa_bss_ie_clean(islpci_private *priv);
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 1e0603c..f057fd9 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -860,11 +860,10 @@
 	priv->state_off = 1;
 
 	/* initialize workqueue's */
-	INIT_WORK(&priv->stats_work,
-		  (void (*)(void *)) prism54_update_stats, priv);
+	INIT_WORK(&priv->stats_work, prism54_update_stats);
 	priv->stats_timestamp = 0;
 
-	INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv);
+	INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
 	priv->reset_task_pending = 0;
 
 	/* allocate various memory areas */
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 676d838..b112291 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -480,9 +480,9 @@
 }
 
 void
-islpci_do_reset_and_wake(void *data)
+islpci_do_reset_and_wake(struct work_struct *work)
 {
-	islpci_private *priv = data;
+	islpci_private *priv = container_of(work, islpci_private, reset_task);
 
 	islpci_reset(priv, 1);
 	priv->reset_task_pending = 0;
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 2678945..5bf820d 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -67,6 +67,6 @@
 int islpci_eth_transmit(struct sk_buff *, struct net_device *);
 int islpci_eth_receive(islpci_private *);
 void islpci_eth_tx_timeout(struct net_device *);
-void islpci_do_reset_and_wake(void *data);
+void islpci_do_reset_and_wake(struct work_struct *);
 
 #endif				/* _ISL_GEN_H */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 036a875..2246f79 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -386,7 +386,7 @@
 
 			/* Create work to handle trap out of interrupt
 			 * context. */
-			INIT_WORK(&frame->ws, prism54_process_trap, frame);
+			INIT_WORK(&frame->ws, prism54_process_trap);
 			schedule_work(&frame->ws);
 
 		} else {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 2696f95..f1573a9 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -32,8 +32,8 @@
 
 static void ieee_init(struct ieee80211_device *ieee);
 static void softmac_init(struct ieee80211softmac_device *sm);
-static void set_rts_cts_work(void *d);
-static void set_basic_rates_work(void *d);
+static void set_rts_cts_work(struct work_struct *work);
+static void set_basic_rates_work(struct work_struct *work);
 
 static void housekeeping_init(struct zd_mac *mac);
 static void housekeeping_enable(struct zd_mac *mac);
@@ -48,8 +48,8 @@
 	memset(mac, 0, sizeof(*mac));
 	spin_lock_init(&mac->lock);
 	mac->netdev = netdev;
-	INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac);
-	INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac);
+	INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
+	INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work);
 
 	ieee_init(ieee);
 	softmac_init(ieee80211_priv(netdev));
@@ -366,9 +366,10 @@
 	spin_unlock_irqrestore(&mac->lock, flags);
 }
 
-static void set_rts_cts_work(void *d)
+static void set_rts_cts_work(struct work_struct *work)
 {
-	struct zd_mac *mac = d;
+	struct zd_mac *mac =
+		container_of(work, struct zd_mac, set_rts_cts_work.work);
 	unsigned long flags;
 	u8 rts_rate;
 	unsigned int short_preamble;
@@ -387,9 +388,10 @@
 	try_enable_tx(mac);
 }
 
-static void set_basic_rates_work(void *d)
+static void set_basic_rates_work(struct work_struct *work)
 {
-	struct zd_mac *mac = d;
+	struct zd_mac *mac =
+		container_of(work, struct zd_mac, set_basic_rates_work.work);
 	unsigned long flags;
 	u16 basic_rates;
 
@@ -467,12 +469,13 @@
 	if (need_set_rts_cts && !mac->updating_rts_rate) {
 		mac->updating_rts_rate = 1;
 		netif_stop_queue(mac->netdev);
-		queue_work(zd_workqueue, &mac->set_rts_cts_work);
+		queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0);
 	}
 	if (need_set_rates && !mac->updating_basic_rates) {
 		mac->updating_basic_rates = 1;
 		netif_stop_queue(mac->netdev);
-		queue_work(zd_workqueue, &mac->set_basic_rates_work);
+		queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work,
+				   0);
 	}
 	spin_unlock_irqrestore(&mac->lock, flags);
 }
@@ -1182,9 +1185,10 @@
 
 #define LINK_LED_WORK_DELAY HZ
 
-static void link_led_handler(void *p)
+static void link_led_handler(struct work_struct *work)
 {
-	struct zd_mac *mac = p;
+	struct zd_mac *mac =
+		container_of(work, struct zd_mac, housekeeping.link_led_work.work);
 	struct zd_chip *chip = &mac->chip;
 	struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
 	int is_associated;
@@ -1205,7 +1209,7 @@
 
 static void housekeeping_init(struct zd_mac *mac)
 {
-	INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac);
+	INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
 }
 
 static void housekeeping_enable(struct zd_mac *mac)
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 5dcfb25..d4e8b87 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -119,7 +119,7 @@
 #define ZD_RX_ERROR			0x80
 
 struct housekeeping {
-	struct work_struct link_led_work;
+	struct delayed_work link_led_work;
 };
 
 #define ZD_MAC_STATS_BUFFER_SIZE 16
@@ -133,8 +133,8 @@
 	struct iw_statistics iw_stats;
 
 	struct housekeeping housekeeping;
-	struct work_struct set_rts_cts_work;
-	struct work_struct set_basic_rates_work;
+	struct delayed_work set_rts_cts_work;
+	struct delayed_work set_basic_rates_work;
 
 	unsigned int stats_count;
 	u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index fc4bc9b..a83c3db 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -29,7 +29,7 @@
 
 struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
 
-static void wq_sync_buffer(void *);
+static void wq_sync_buffer(struct work_struct *work);
 
 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
 static int work_enabled;
@@ -65,7 +65,7 @@
 		b->sample_received = 0;
 		b->sample_lost_overflow = 0;
 		b->cpu = i;
-		INIT_WORK(&b->work, wq_sync_buffer, b);
+		INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
 	}
 	return 0;
 
@@ -282,9 +282,10 @@
  * By using schedule_delayed_work_on and then schedule_delayed_work
  * we guarantee this will stay on the correct cpu
  */
-static void wq_sync_buffer(void * data)
+static void wq_sync_buffer(struct work_struct *work)
 {
-	struct oprofile_cpu_buffer * b = data;
+	struct oprofile_cpu_buffer * b =
+		container_of(work, struct oprofile_cpu_buffer, work.work);
 	if (b->cpu != smp_processor_id()) {
 		printk("WQ on CPU%d, prefer CPU%d\n",
 		       smp_processor_id(), b->cpu);
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 09abb80..49900d9 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -43,7 +43,7 @@
 	unsigned long sample_lost_overflow;
 	unsigned long backtrace_aborted;
 	int cpu;
-	struct work_struct work;
+	struct delayed_work work;
 } ____cacheline_aligned;
 
 extern struct oprofile_cpu_buffer cpu_buffer[];
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 39c9664..b61c17b 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1975,7 +1975,7 @@
 /* --- IRQ detection -------------------------------------- */
 
 /* Only if supports ECP mode */
-static int __devinit programmable_irq_support(struct parport *pb)
+static int programmable_irq_support(struct parport *pb)
 {
 	int irq, intrLine;
 	unsigned char oecr = inb (ECONTROL (pb));
@@ -1992,7 +1992,7 @@
 	return irq;
 }
 
-static int __devinit irq_probe_ECP(struct parport *pb)
+static int irq_probe_ECP(struct parport *pb)
 {
 	int i;
 	unsigned long irqs;
@@ -2020,7 +2020,7 @@
  * This detection seems that only works in National Semiconductors
  * This doesn't work in SMC, LGS, and Winbond 
  */
-static int __devinit irq_probe_EPP(struct parport *pb)
+static int irq_probe_EPP(struct parport *pb)
 {
 #ifndef ADVANCED_DETECT
 	return PARPORT_IRQ_NONE;
@@ -2059,7 +2059,7 @@
 #endif /* Advanced detection */
 }
 
-static int __devinit irq_probe_SPP(struct parport *pb)
+static int irq_probe_SPP(struct parport *pb)
 {
 	/* Don't even try to do this. */
 	return PARPORT_IRQ_NONE;
@@ -2747,6 +2747,7 @@
 	titan_1284p2,
 	avlab_1p,
 	avlab_2p,
+	oxsemi_952,
 	oxsemi_954,
 	oxsemi_840,
 	aks_0100,
@@ -2822,6 +2823,7 @@
 	/* avlab_2p		*/	{ 2, { { 0, 1}, { 2, 3 },} },
 	/* The Oxford Semi cards are unusual: 954 doesn't support ECP,
 	 * and 840 locks up if you write 1 to bit 2! */
+	/* oxsemi_952 */		{ 1, { { 0, 1 }, } },
 	/* oxsemi_954 */		{ 1, { { 0, -1 }, } },
 	/* oxsemi_840 */		{ 1, { { 0, -1 }, } },
 	/* aks_0100 */                  { 1, { { 0, -1 }, } },
@@ -2895,6 +2897,8 @@
 	/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
 	{ 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */
 	{ 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p},
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 },
 	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954PP,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
 	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index ea2087c..5075769 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -70,7 +70,7 @@
 	struct hotplug_slot *hotplug_slot;
 	struct list_head	slot_list;
 	char name[SLOT_NAME_SIZE];
-	struct work_struct work;	/* work for button event */
+	struct delayed_work work;	/* work for button event */
 	struct mutex lock;
 };
 
@@ -187,7 +187,7 @@
 extern int	shpchp_unconfigure_device(struct slot *p_slot);
 extern void	shpchp_remove_ctrl_files(struct controller *ctrl);
 extern void	cleanup_slots(struct controller *ctrl);
-extern void	queue_pushbutton_work(void *data);
+extern void	queue_pushbutton_work(struct work_struct *work);
 
 
 #ifdef CONFIG_ACPI
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 235c18a..4eac85b 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -159,7 +159,7 @@
 			goto error_info;
 
 		slot->number = sun;
-		INIT_WORK(&slot->work, queue_pushbutton_work, slot);
+		INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work);
 
 		/* register this slot with the hotplug pci core */
 		hotplug_slot->private = slot;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index c39901d..158ac78 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -36,7 +36,7 @@
 #include "../pci.h"
 #include "shpchp.h"
 
-static void interrupt_event_handler(void *data);
+static void interrupt_event_handler(struct work_struct *work);
 static int shpchp_enable_slot(struct slot *p_slot);
 static int shpchp_disable_slot(struct slot *p_slot);
 
@@ -50,7 +50,7 @@
 
 	info->event_type = event_type;
 	info->p_slot = p_slot;
-	INIT_WORK(&info->work, interrupt_event_handler, info);
+	INIT_WORK(&info->work, interrupt_event_handler);
 
 	schedule_work(&info->work);
 
@@ -408,9 +408,10 @@
  * Handles all pending events and exits.
  *
  */
-static void shpchp_pushbutton_thread(void *data)
+static void shpchp_pushbutton_thread(struct work_struct *work)
 {
-	struct pushbutton_work_info *info = data;
+	struct pushbutton_work_info *info =
+		container_of(work, struct pushbutton_work_info, work);
 	struct slot *p_slot = info->p_slot;
 
 	mutex_lock(&p_slot->lock);
@@ -436,9 +437,9 @@
 	kfree(info);
 }
 
-void queue_pushbutton_work(void *data)
+void queue_pushbutton_work(struct work_struct *work)
 {
-	struct slot *p_slot = data;
+	struct slot *p_slot = container_of(work, struct slot, work.work);
 	struct pushbutton_work_info *info;
 
 	info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -447,7 +448,7 @@
 		return;
 	}
 	info->p_slot = p_slot;
-	INIT_WORK(&info->work, shpchp_pushbutton_thread, info);
+	INIT_WORK(&info->work, shpchp_pushbutton_thread);
 
 	mutex_lock(&p_slot->lock);
 	switch (p_slot->state) {
@@ -541,9 +542,9 @@
 	}
 }
 
-static void interrupt_event_handler(void *data)
+static void interrupt_event_handler(struct work_struct *work)
 {
-	struct event_info *info = data;
+	struct event_info *info = container_of(work, struct event_info, work);
 	struct slot *p_slot = info->p_slot;
 
 	mutex_lock(&p_slot->lock);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 9fc9a34..9168401 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -26,7 +26,7 @@
 
 static DEFINE_SPINLOCK(msi_lock);
 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
-static kmem_cache_t* msi_cachep;
+static struct kmem_cache* msi_cachep;
 
 static int pci_msi_enable = 1;
 
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 04c43ef..55866b6 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -160,7 +160,7 @@
 	rpc->e_lock = SPIN_LOCK_UNLOCKED;
 
 	rpc->rpd = dev;
-	INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev);
+	INIT_WORK(&rpc->dpc_handler, aer_isr);
 	rpc->prod_idx = rpc->cons_idx = 0;
 	mutex_init(&rpc->rpc_mutex);
 	init_waitqueue_head(&rpc->wait_release);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index daf0cad..3c0a58f 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -118,7 +118,7 @@
 extern void aer_enable_rootport(struct aer_rpc *rpc);
 extern void aer_delete_rootport(struct aer_rpc *rpc);
 extern int aer_init(struct pcie_device *dev);
-extern void aer_isr(void *context);
+extern void aer_isr(struct work_struct *work);
 extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
 extern int aer_osc_setup(struct pci_dev *dev);
 
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 1c7e660..08e1303 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -690,14 +690,14 @@
 
 /**
  * aer_isr - consume errors detected by root port
- * @context: pointer to a private data of pcie device
+ * @work: definition of this work item
  *
  * Invoked, as DPC, when root port records new detected error
  **/
-void aer_isr(void *context)
+void aer_isr(struct work_struct *work)
 {
-	struct pcie_device *p_device = (struct pcie_device *) context;
-	struct aer_rpc *rpc = get_service_data(p_device);
+	struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
+	struct pcie_device *p_device = rpc->rpd;
 	struct aer_err_source *e_src;
 
 	mutex_lock(&rpc->rpc_mutex);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0eeac60..6a3c1e7 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -873,6 +873,7 @@
 	dev->dev.release = pci_release_dev;
 	pci_dev_get(dev);
 
+	set_dev_node(&dev->dev, pcibus_to_node(bus));
 	dev->dev.dma_mask = &dev->dma_mask;
 	dev->dev.coherent_dma_mask = 0xffffffffull;
 
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index f9cd831..606a467 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -29,6 +29,7 @@
 #include <linux/pci.h>
 #include <linux/device.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include <asm/system.h>
 #include <asm/irq.h>
 
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 45df12e..7355eb4 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -675,9 +675,10 @@
 }
 
 
-static void pcmcia_delayed_add_device(void *data)
+static void pcmcia_delayed_add_device(struct work_struct *work)
 {
-	struct pcmcia_socket *s = data;
+	struct pcmcia_socket *s =
+		container_of(work, struct pcmcia_socket, device_add);
 	ds_dbg(1, "adding additional device to %d\n", s->sock);
 	pcmcia_device_add(s, s->pcmcia_state.mfc_pfc);
 	s->pcmcia_state.device_add_pending = 0;
@@ -1349,7 +1350,7 @@
 	init_waitqueue_head(&socket->queue);
 #endif
 	INIT_LIST_HEAD(&socket->devices_list);
-	INIT_WORK(&socket->device_add, pcmcia_delayed_add_device, socket);
+	INIT_WORK(&socket->device_add, pcmcia_delayed_add_device);
 	memset(&socket->pcmcia_state, 0, sizeof(u8));
 	socket->device_count = 0;
 
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index 227600c..91c047a 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -164,9 +164,17 @@
 
 static int pnp_interface_attach_card(struct pnp_card *card)
 {
-	device_create_file(&card->dev,&dev_attr_name);
-	device_create_file(&card->dev,&dev_attr_card_id);
+	int rc = device_create_file(&card->dev,&dev_attr_name);
+	if (rc) return rc;
+
+	rc = device_create_file(&card->dev,&dev_attr_card_id);
+	if (rc) goto err_name;
+
 	return 0;
+
+err_name:
+	device_remove_file(&card->dev,&dev_attr_name);
+	return rc;
 }
 
 /**
@@ -306,16 +314,20 @@
 	down_write(&dev->dev.bus->subsys.rwsem);
 	dev->card_link = clink;
 	dev->dev.driver = &drv->link.driver;
-	if (pnp_bus_type.probe(&dev->dev)) {
-		dev->dev.driver = NULL;
-		dev->card_link = NULL;
-		up_write(&dev->dev.bus->subsys.rwsem);
-		return NULL;
-	}
-	device_bind_driver(&dev->dev);
+	if (pnp_bus_type.probe(&dev->dev))
+		goto err_out;
+	if (device_bind_driver(&dev->dev))
+		goto err_out;
+
 	up_write(&dev->dev.bus->subsys.rwsem);
 
 	return dev;
+
+err_out:
+	dev->dev.driver = NULL;
+	dev->card_link = NULL;
+	up_write(&dev->dev.bus->subsys.rwsem);
+	return NULL;
 }
 
 /**
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 9d8b415..ac9fcd4 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -461,8 +461,19 @@
 
 int pnp_interface_attach_device(struct pnp_dev *dev)
 {
-	device_create_file(&dev->dev,&dev_attr_options);
-	device_create_file(&dev->dev,&dev_attr_resources);
-	device_create_file(&dev->dev,&dev_attr_id);
+	int rc = device_create_file(&dev->dev,&dev_attr_options);
+	if (rc) goto err;
+	rc = device_create_file(&dev->dev,&dev_attr_resources);
+	if (rc) goto err_opt;
+	rc = device_create_file(&dev->dev,&dev_attr_id);
+	if (rc) goto err_res;
+
 	return 0;
+
+err_res:
+	device_remove_file(&dev->dev,&dev_attr_resources);
+err_opt:
+	device_remove_file(&dev->dev,&dev_attr_options);
+err:
+	return rc;
 }
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 80066ad..33adeba 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -61,6 +61,7 @@
 #include <linux/dmi.h>
 #include <linux/delay.h>
 #include <linux/acpi.h>
+#include <linux/freezer.h>
 
 #include <asm/page.h>
 #include <asm/desc.h>
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index fc766a7..2a63ab2 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -154,15 +154,23 @@
 	  will be called rtc-ds1672.
 
 config RTC_DRV_DS1742
-	tristate "Dallas DS1742"
+	tristate "Dallas DS1742/1743"
 	depends on RTC_CLASS
 	help
 	  If you say yes here you get support for the
-	  Dallas DS1742 timekeeping chip.
+	  Dallas DS1742/1743 timekeeping chip.
 
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-ds1742.
 
+config RTC_DRV_OMAP
+	tristate "TI OMAP1"
+	depends on RTC_CLASS && ( \
+		ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 )
+	help
+	  Say "yes" here to support the real time clock on TI OMAP1 chips.
+	  This driver can also be built as a module called rtc-omap.
+
 config RTC_DRV_PCF8563
 	tristate "Philips PCF8563/Epson RTC8564"
 	depends on RTC_CLASS && I2C
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 3ba5ff6..bd4c45d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_RTC_DRV_DS1307)	+= rtc-ds1307.o
 obj-$(CONFIG_RTC_DRV_DS1672)	+= rtc-ds1672.o
 obj-$(CONFIG_RTC_DRV_DS1742)	+= rtc-ds1742.o
+obj-$(CONFIG_RTC_DRV_OMAP)	+= rtc-omap.o
 obj-$(CONFIG_RTC_DRV_PCF8563)	+= rtc-pcf8563.o
 obj-$(CONFIG_RTC_DRV_PCF8583)	+= rtc-pcf8583.o
 obj-$(CONFIG_RTC_DRV_RS5C372)	+= rtc-rs5c372.o
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 814b9e1..828b329 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -53,9 +53,10 @@
  * Routine to poll RTC seconds field for change as often as possible,
  * after first RTC_UIE use timer to reduce polling
  */
-static void rtc_uie_task(void *data)
+static void rtc_uie_task(struct work_struct *work)
 {
-	struct rtc_device *rtc = data;
+	struct rtc_device *rtc =
+		container_of(work, struct rtc_device, uie_task);
 	struct rtc_time tm;
 	int num = 0;
 	int err;
@@ -411,7 +412,7 @@
 	spin_lock_init(&rtc->irq_lock);
 	init_waitqueue_head(&rtc->irq_queue);
 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc);
+	INIT_WORK(&rtc->uie_task, rtc_uie_task);
 	setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
 #endif
 
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 67e816a..dfef163 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -237,17 +237,22 @@
 	/* read control register */
 	err = ds1672_get_control(client, &control);
 	if (err)
-		goto exit_detach;
+		goto exit_devreg;
 
 	if (control & DS1672_REG_CONTROL_EOSC)
 		dev_warn(&client->dev, "Oscillator not enabled. "
 					"Set time to enable.\n");
 
 	/* Register sysfs hooks */
-	device_create_file(&client->dev, &dev_attr_control);
+	err = device_create_file(&client->dev, &dev_attr_control);
+	if (err)
+		goto exit_devreg;
 
 	return 0;
 
+exit_devreg:
+	rtc_device_unregister(rtc);
+
 exit_detach:
 	i2c_detach_client(client);
 
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 6273a3d..17633bf 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -6,6 +6,10 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2006 Torsten Ertbjerg Rasmussen <tr@newtec.dk>
+ *  - nvram size determined from resource
+ *  - this ds1742 driver now supports ds1743.
  */
 
 #include <linux/bcd.h>
@@ -17,20 +21,19 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
 
-#define RTC_REG_SIZE		0x800
-#define RTC_OFFSET		0x7f8
+#define RTC_SIZE		8
 
-#define RTC_CONTROL		(RTC_OFFSET + 0)
-#define RTC_CENTURY		(RTC_OFFSET + 0)
-#define RTC_SECONDS		(RTC_OFFSET + 1)
-#define RTC_MINUTES		(RTC_OFFSET + 2)
-#define RTC_HOURS		(RTC_OFFSET + 3)
-#define RTC_DAY			(RTC_OFFSET + 4)
-#define RTC_DATE		(RTC_OFFSET + 5)
-#define RTC_MONTH		(RTC_OFFSET + 6)
-#define RTC_YEAR		(RTC_OFFSET + 7)
+#define RTC_CONTROL		0
+#define RTC_CENTURY		0
+#define RTC_SECONDS		1
+#define RTC_MINUTES		2
+#define RTC_HOURS		3
+#define RTC_DAY			4
+#define RTC_DATE		5
+#define RTC_MONTH		6
+#define RTC_YEAR		7
 
 #define RTC_CENTURY_MASK	0x3f
 #define RTC_SECONDS_MASK	0x7f
@@ -48,7 +51,10 @@
 
 struct rtc_plat_data {
 	struct rtc_device *rtc;
-	void __iomem *ioaddr;
+	void __iomem *ioaddr_nvram;
+	void __iomem *ioaddr_rtc;
+	size_t size_nvram;
+	size_t size;
 	unsigned long baseaddr;
 	unsigned long last_jiffies;
 };
@@ -57,7 +63,7 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-	void __iomem *ioaddr = pdata->ioaddr;
+	void __iomem *ioaddr = pdata->ioaddr_rtc;
 	u8 century;
 
 	century = BIN2BCD((tm->tm_year + 1900) / 100);
@@ -82,7 +88,7 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-	void __iomem *ioaddr = pdata->ioaddr;
+	void __iomem *ioaddr = pdata->ioaddr_rtc;
 	unsigned int year, month, day, hour, minute, second, week;
 	unsigned int century;
 
@@ -127,10 +133,10 @@
 	struct platform_device *pdev =
 		to_platform_device(container_of(kobj, struct device, kobj));
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-	void __iomem *ioaddr = pdata->ioaddr;
+	void __iomem *ioaddr = pdata->ioaddr_nvram;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+	for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
 		*buf++ = readb(ioaddr + pos++);
 	return count;
 }
@@ -141,10 +147,10 @@
 	struct platform_device *pdev =
 		to_platform_device(container_of(kobj, struct device, kobj));
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-	void __iomem *ioaddr = pdata->ioaddr;
+	void __iomem *ioaddr = pdata->ioaddr_nvram;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+	for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
 		writeb(*buf++, ioaddr + pos++);
 	return count;
 }
@@ -155,7 +161,6 @@
 		.mode = S_IRUGO | S_IWUGO,
 		.owner = THIS_MODULE,
 	},
-	.size = RTC_OFFSET,
 	.read = ds1742_nvram_read,
 	.write = ds1742_nvram_write,
 };
@@ -175,19 +180,23 @@
 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
+	pdata->size = res->end - res->start + 1;
+	if (!request_mem_region(res->start, pdata->size, pdev->name)) {
 		ret = -EBUSY;
 		goto out;
 	}
 	pdata->baseaddr = res->start;
-	ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
+	ioaddr = ioremap(pdata->baseaddr, pdata->size);
 	if (!ioaddr) {
 		ret = -ENOMEM;
 		goto out;
 	}
-	pdata->ioaddr = ioaddr;
+	pdata->ioaddr_nvram = ioaddr;
+	pdata->size_nvram = pdata->size - RTC_SIZE;
+	pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
 
 	/* turn RTC on if it was not on */
+	ioaddr = pdata->ioaddr_rtc;
 	sec = readb(ioaddr + RTC_SECONDS);
 	if (sec & RTC_STOP) {
 		sec &= RTC_SECONDS_MASK;
@@ -208,6 +217,8 @@
 	pdata->rtc = rtc;
 	pdata->last_jiffies = jiffies;
 	platform_set_drvdata(pdev, pdata);
+	ds1742_nvram_attr.size = max(ds1742_nvram_attr.size,
+				     pdata->size_nvram);
 	ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
 	if (ret)
 		goto out;
@@ -215,10 +226,10 @@
  out:
 	if (pdata->rtc)
 		rtc_device_unregister(pdata->rtc);
-	if (ioaddr)
-		iounmap(ioaddr);
+	if (pdata->ioaddr_nvram)
+		iounmap(pdata->ioaddr_nvram);
 	if (pdata->baseaddr)
-		release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
+		release_mem_region(pdata->baseaddr, pdata->size);
 	kfree(pdata);
 	return ret;
 }
@@ -229,8 +240,8 @@
 
 	sysfs_remove_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
 	rtc_device_unregister(pdata->rtc);
-	iounmap(pdata->ioaddr);
-	release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
+	iounmap(pdata->ioaddr_nvram);
+	release_mem_region(pdata->baseaddr, pdata->size);
 	kfree(pdata);
 	return 0;
 }
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
new file mode 100644
index 0000000..eac5fb1
--- /dev/null
+++ b/drivers/rtc/rtc-omap.c
@@ -0,0 +1,572 @@
+/*
+ * TI OMAP1 Real Time Clock interface for Linux
+ *
+ * Copyright (C) 2003 MontaVista Software, Inc.
+ * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com>
+ *
+ * Copyright (C) 2006 David Brownell (new RTC framework)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/mach/time.h>
+
+
+/* The OMAP1 RTC is a year/month/day/hours/minutes/seconds BCD clock
+ * with century-range alarm matching, driven by the 32kHz clock.
+ *
+ * The main user-visible ways it differs from PC RTCs are by omitting
+ * "don't care" alarm fields and sub-second periodic IRQs, and having
+ * an autoadjust mechanism to calibrate to the true oscillator rate.
+ *
+ * Board-specific wiring options include using split power mode with
+ * RTC_OFF_NOFF used as the reset signal (so the RTC won't be reset),
+ * and wiring RTC_WAKE_INT (so the RTC alarm can wake the system from
+ * low power modes).  See the BOARD-SPECIFIC CUSTOMIZATION comment.
+ */
+
+#define OMAP_RTC_BASE			0xfffb4800
+
+/* RTC registers */
+#define OMAP_RTC_SECONDS_REG		0x00
+#define OMAP_RTC_MINUTES_REG		0x04
+#define OMAP_RTC_HOURS_REG		0x08
+#define OMAP_RTC_DAYS_REG		0x0C
+#define OMAP_RTC_MONTHS_REG		0x10
+#define OMAP_RTC_YEARS_REG		0x14
+#define OMAP_RTC_WEEKS_REG		0x18
+
+#define OMAP_RTC_ALARM_SECONDS_REG	0x20
+#define OMAP_RTC_ALARM_MINUTES_REG	0x24
+#define OMAP_RTC_ALARM_HOURS_REG	0x28
+#define OMAP_RTC_ALARM_DAYS_REG		0x2c
+#define OMAP_RTC_ALARM_MONTHS_REG	0x30
+#define OMAP_RTC_ALARM_YEARS_REG	0x34
+
+#define OMAP_RTC_CTRL_REG		0x40
+#define OMAP_RTC_STATUS_REG		0x44
+#define OMAP_RTC_INTERRUPTS_REG		0x48
+
+#define OMAP_RTC_COMP_LSB_REG		0x4c
+#define OMAP_RTC_COMP_MSB_REG		0x50
+#define OMAP_RTC_OSC_REG		0x54
+
+/* OMAP_RTC_CTRL_REG bit fields: */
+#define OMAP_RTC_CTRL_SPLIT		(1<<7)
+#define OMAP_RTC_CTRL_DISABLE		(1<<6)
+#define OMAP_RTC_CTRL_SET_32_COUNTER	(1<<5)
+#define OMAP_RTC_CTRL_TEST		(1<<4)
+#define OMAP_RTC_CTRL_MODE_12_24	(1<<3)
+#define OMAP_RTC_CTRL_AUTO_COMP		(1<<2)
+#define OMAP_RTC_CTRL_ROUND_30S		(1<<1)
+#define OMAP_RTC_CTRL_STOP		(1<<0)
+
+/* OMAP_RTC_STATUS_REG bit fields: */
+#define OMAP_RTC_STATUS_POWER_UP        (1<<7)
+#define OMAP_RTC_STATUS_ALARM           (1<<6)
+#define OMAP_RTC_STATUS_1D_EVENT        (1<<5)
+#define OMAP_RTC_STATUS_1H_EVENT        (1<<4)
+#define OMAP_RTC_STATUS_1M_EVENT        (1<<3)
+#define OMAP_RTC_STATUS_1S_EVENT        (1<<2)
+#define OMAP_RTC_STATUS_RUN             (1<<1)
+#define OMAP_RTC_STATUS_BUSY            (1<<0)
+
+/* OMAP_RTC_INTERRUPTS_REG bit fields: */
+#define OMAP_RTC_INTERRUPTS_IT_ALARM    (1<<3)
+#define OMAP_RTC_INTERRUPTS_IT_TIMER    (1<<2)
+
+
+#define rtc_read(addr)		omap_readb(OMAP_RTC_BASE + (addr))
+#define rtc_write(val, addr)	omap_writeb(val, OMAP_RTC_BASE + (addr))
+
+
+/* platform_bus isn't hotpluggable, so for static linkage it'd be safe
+ * to get rid of probe() and remove() code ... too bad the driver struct
+ * remembers probe(), that's about 25% of the runtime footprint!!
+ */
+#ifndef	MODULE
+#undef	__devexit
+#undef	__devexit_p
+#define	__devexit	__exit
+#define	__devexit_p	__exit_p
+#endif
+
+
+/* we rely on the rtc framework to handle locking (rtc->ops_lock),
+ * so the only other requirement is that register accesses which
+ * require BUSY to be clear are made with IRQs locally disabled
+ */
+static void rtc_wait_not_busy(void)
+{
+	int	count = 0;
+	u8	status;
+
+	/* BUSY may stay active for 1/32768 second (~30 usec) */
+	for (count = 0; count < 50; count++) {
+		status = rtc_read(OMAP_RTC_STATUS_REG);
+		if ((status & (u8)OMAP_RTC_STATUS_BUSY) == 0)
+			break;
+		udelay(1);
+	}
+	/* now we have ~15 usec to read/write various registers */
+}
+
+static irqreturn_t rtc_irq(int irq, void *class_dev)
+{
+	unsigned long		events = 0;
+	u8			irq_data;
+
+	irq_data = rtc_read(OMAP_RTC_STATUS_REG);
+
+	/* alarm irq? */
+	if (irq_data & OMAP_RTC_STATUS_ALARM) {
+		rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG);
+		events |= RTC_IRQF | RTC_AF;
+	}
+
+	/* 1/sec periodic/update irq? */
+	if (irq_data & OMAP_RTC_STATUS_1S_EVENT)
+		events |= RTC_IRQF | RTC_UF;
+
+	rtc_update_irq(class_dev, 1, events);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef	CONFIG_RTC_INTF_DEV
+
+static int
+omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+	u8 reg;
+
+	switch (cmd) {
+	case RTC_AIE_OFF:
+	case RTC_AIE_ON:
+	case RTC_UIE_OFF:
+	case RTC_UIE_ON:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	local_irq_disable();
+	rtc_wait_not_busy();
+	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+	switch (cmd) {
+	/* AIE = Alarm Interrupt Enable */
+	case RTC_AIE_OFF:
+		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+		break;
+	case RTC_AIE_ON:
+		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+		break;
+	/* UIE = Update Interrupt Enable (1/second) */
+	case RTC_UIE_OFF:
+		reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
+		break;
+	case RTC_UIE_ON:
+		reg |= OMAP_RTC_INTERRUPTS_IT_TIMER;
+		break;
+	}
+	rtc_wait_not_busy();
+	rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+	local_irq_enable();
+
+	return 0;
+}
+
+#else
+#define	omap_rtc_ioctl	NULL
+#endif
+
+/* this hardware doesn't support "don't care" alarm fields */
+static int tm2bcd(struct rtc_time *tm)
+{
+	if (rtc_valid_tm(tm) != 0)
+		return -EINVAL;
+
+	tm->tm_sec = BIN2BCD(tm->tm_sec);
+	tm->tm_min = BIN2BCD(tm->tm_min);
+	tm->tm_hour = BIN2BCD(tm->tm_hour);
+	tm->tm_mday = BIN2BCD(tm->tm_mday);
+
+	tm->tm_mon = BIN2BCD(tm->tm_mon + 1);
+
+	/* epoch == 1900 */
+	if (tm->tm_year < 100 || tm->tm_year > 199)
+		return -EINVAL;
+	tm->tm_year = BIN2BCD(tm->tm_year - 100);
+
+	return 0;
+}
+
+static void bcd2tm(struct rtc_time *tm)
+{
+	tm->tm_sec = BCD2BIN(tm->tm_sec);
+	tm->tm_min = BCD2BIN(tm->tm_min);
+	tm->tm_hour = BCD2BIN(tm->tm_hour);
+	tm->tm_mday = BCD2BIN(tm->tm_mday);
+	tm->tm_mon = BCD2BIN(tm->tm_mon) - 1;
+	/* epoch == 1900 */
+	tm->tm_year = BCD2BIN(tm->tm_year) + 100;
+}
+
+
+static int omap_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	/* we don't report wday/yday/isdst ... */
+	local_irq_disable();
+	rtc_wait_not_busy();
+
+	tm->tm_sec = rtc_read(OMAP_RTC_SECONDS_REG);
+	tm->tm_min = rtc_read(OMAP_RTC_MINUTES_REG);
+	tm->tm_hour = rtc_read(OMAP_RTC_HOURS_REG);
+	tm->tm_mday = rtc_read(OMAP_RTC_DAYS_REG);
+	tm->tm_mon = rtc_read(OMAP_RTC_MONTHS_REG);
+	tm->tm_year = rtc_read(OMAP_RTC_YEARS_REG);
+
+	local_irq_enable();
+
+	bcd2tm(tm);
+	return 0;
+}
+
+static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	if (tm2bcd(tm) < 0)
+		return -EINVAL;
+	local_irq_disable();
+	rtc_wait_not_busy();
+
+	rtc_write(tm->tm_year, OMAP_RTC_YEARS_REG);
+	rtc_write(tm->tm_mon, OMAP_RTC_MONTHS_REG);
+	rtc_write(tm->tm_mday, OMAP_RTC_DAYS_REG);
+	rtc_write(tm->tm_hour, OMAP_RTC_HOURS_REG);
+	rtc_write(tm->tm_min, OMAP_RTC_MINUTES_REG);
+	rtc_write(tm->tm_sec, OMAP_RTC_SECONDS_REG);
+
+	local_irq_enable();
+
+	return 0;
+}
+
+static int omap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+	local_irq_disable();
+	rtc_wait_not_busy();
+
+	alm->time.tm_sec = rtc_read(OMAP_RTC_ALARM_SECONDS_REG);
+	alm->time.tm_min = rtc_read(OMAP_RTC_ALARM_MINUTES_REG);
+	alm->time.tm_hour = rtc_read(OMAP_RTC_ALARM_HOURS_REG);
+	alm->time.tm_mday = rtc_read(OMAP_RTC_ALARM_DAYS_REG);
+	alm->time.tm_mon = rtc_read(OMAP_RTC_ALARM_MONTHS_REG);
+	alm->time.tm_year = rtc_read(OMAP_RTC_ALARM_YEARS_REG);
+
+	local_irq_enable();
+
+	bcd2tm(&alm->time);
+	alm->pending = !!(rtc_read(OMAP_RTC_INTERRUPTS_REG)
+			& OMAP_RTC_INTERRUPTS_IT_ALARM);
+	alm->enabled = alm->pending && device_may_wakeup(dev);
+
+	return 0;
+}
+
+static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+	u8 reg;
+
+	/* Much userspace code uses RTC_ALM_SET, thus "don't care" for
+	 * day/month/year specifies alarms up to 24 hours in the future.
+	 * So we need to handle that ... but let's ignore the "don't care"
+	 * values for hours/minutes/seconds.
+	 */
+	if (alm->time.tm_mday <= 0
+			&& alm->time.tm_mon < 0
+			&& alm->time.tm_year < 0) {
+		struct rtc_time tm;
+		unsigned long now, then;
+
+		omap_rtc_read_time(dev, &tm);
+		rtc_tm_to_time(&tm, &now);
+
+		alm->time.tm_mday = tm.tm_mday;
+		alm->time.tm_mon = tm.tm_mon;
+		alm->time.tm_year = tm.tm_year;
+		rtc_tm_to_time(&alm->time, &then);
+
+		/* sometimes the alarm wraps into tomorrow */
+		if (then < now) {
+			rtc_time_to_tm(now + 24 * 60 * 60, &tm);
+			alm->time.tm_mday = tm.tm_mday;
+			alm->time.tm_mon = tm.tm_mon;
+			alm->time.tm_year = tm.tm_year;
+		}
+	}
+
+	if (tm2bcd(&alm->time) < 0)
+		return -EINVAL;
+
+	local_irq_disable();
+	rtc_wait_not_busy();
+
+	rtc_write(alm->time.tm_year, OMAP_RTC_ALARM_YEARS_REG);
+	rtc_write(alm->time.tm_mon, OMAP_RTC_ALARM_MONTHS_REG);
+	rtc_write(alm->time.tm_mday, OMAP_RTC_ALARM_DAYS_REG);
+	rtc_write(alm->time.tm_hour, OMAP_RTC_ALARM_HOURS_REG);
+	rtc_write(alm->time.tm_min, OMAP_RTC_ALARM_MINUTES_REG);
+	rtc_write(alm->time.tm_sec, OMAP_RTC_ALARM_SECONDS_REG);
+
+	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+	if (alm->enabled)
+		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+	else
+		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+	rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+
+	local_irq_enable();
+
+	return 0;
+}
+
+static struct rtc_class_ops omap_rtc_ops = {
+	.ioctl		= omap_rtc_ioctl,
+	.read_time	= omap_rtc_read_time,
+	.set_time	= omap_rtc_set_time,
+	.read_alarm	= omap_rtc_read_alarm,
+	.set_alarm	= omap_rtc_set_alarm,
+};
+
+static int omap_rtc_alarm;
+static int omap_rtc_timer;
+
+static int __devinit omap_rtc_probe(struct platform_device *pdev)
+{
+	struct resource		*res, *mem;
+	struct rtc_device	*rtc;
+	u8			reg, new_ctrl;
+
+	omap_rtc_timer = platform_get_irq(pdev, 0);
+	if (omap_rtc_timer <= 0) {
+		pr_debug("%s: no update irq?\n", pdev->name);
+		return -ENOENT;
+	}
+
+	omap_rtc_alarm = platform_get_irq(pdev, 1);
+	if (omap_rtc_alarm <= 0) {
+		pr_debug("%s: no alarm irq?\n", pdev->name);
+		return -ENOENT;
+	}
+
+	/* NOTE:  using static mapping for RTC registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res && res->start != OMAP_RTC_BASE) {
+		pr_debug("%s: RTC registers at %08x, expected %08x\n",
+			pdev->name, (unsigned) res->start, OMAP_RTC_BASE);
+		return -ENOENT;
+	}
+
+	if (res)
+		mem = request_mem_region(res->start,
+				res->end - res->start + 1,
+				pdev->name);
+	else
+		mem = NULL;
+	if (!mem) {
+		pr_debug("%s: RTC registers at %08x are not free\n",
+			pdev->name, OMAP_RTC_BASE);
+		return -EBUSY;
+	}
+
+	rtc = rtc_device_register(pdev->name, &pdev->dev,
+			&omap_rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc)) {
+		pr_debug("%s: can't register RTC device, err %ld\n",
+			pdev->name, PTR_ERR(rtc));
+		goto fail;
+	}
+	platform_set_drvdata(pdev, rtc);
+	class_set_devdata(&rtc->class_dev, mem);
+
+	/* clear pending irqs, and set 1/second periodic,
+	 * which we'll use instead of update irqs
+	 */
+	rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+
+	/* clear old status */
+	reg = rtc_read(OMAP_RTC_STATUS_REG);
+	if (reg & (u8) OMAP_RTC_STATUS_POWER_UP) {
+		pr_info("%s: RTC power up reset detected\n",
+			pdev->name);
+		rtc_write(OMAP_RTC_STATUS_POWER_UP, OMAP_RTC_STATUS_REG);
+	}
+	if (reg & (u8) OMAP_RTC_STATUS_ALARM)
+		rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG);
+
+	/* handle periodic and alarm irqs */
+	if (request_irq(omap_rtc_timer, rtc_irq, SA_INTERRUPT,
+			rtc->class_dev.class_id, &rtc->class_dev)) {
+		pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n",
+			pdev->name, omap_rtc_timer);
+		goto fail0;
+	}
+	if (request_irq(omap_rtc_alarm, rtc_irq, SA_INTERRUPT,
+			rtc->class_dev.class_id, &rtc->class_dev)) {
+		pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n",
+			pdev->name, omap_rtc_alarm);
+		goto fail1;
+	}
+
+	/* On boards with split power, RTC_ON_NOFF won't reset the RTC */
+	reg = rtc_read(OMAP_RTC_CTRL_REG);
+	if (reg & (u8) OMAP_RTC_CTRL_STOP)
+		pr_info("%s: already running\n", pdev->name);
+
+	/* force to 24 hour mode */
+	new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
+	new_ctrl |= OMAP_RTC_CTRL_STOP;
+
+	/* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
+	 *
+	 *  - Boards wired so that RTC_WAKE_INT does something, and muxed
+	 *    right (W13_1610_RTC_WAKE_INT is the default after chip reset),
+	 *    should initialize the device wakeup flag appropriately.
+	 *
+	 *  - Boards wired so RTC_ON_nOFF is used as the reset signal,
+	 *    rather than nPWRON_RESET, should forcibly enable split
+	 *    power mode.  (Some chip errata report that RTC_CTRL_SPLIT
+	 *    is write-only, and always reads as zero...)
+	 */
+	device_init_wakeup(&pdev->dev, 0);
+
+	if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT)
+		pr_info("%s: split power mode\n", pdev->name);
+
+	if (reg != new_ctrl)
+		rtc_write(new_ctrl, OMAP_RTC_CTRL_REG);
+
+	return 0;
+
+fail1:
+	free_irq(omap_rtc_timer, NULL);
+fail0:
+	rtc_device_unregister(rtc);
+fail:
+	release_resource(mem);
+	return -EIO;
+}
+
+static int __devexit omap_rtc_remove(struct platform_device *pdev)
+{
+	struct rtc_device	*rtc = platform_get_drvdata(pdev);;
+
+	device_init_wakeup(&pdev->dev, 0);
+
+	/* leave rtc running, but disable irqs */
+	rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+
+	free_irq(omap_rtc_timer, rtc);
+	free_irq(omap_rtc_alarm, rtc);
+
+	release_resource(class_get_devdata(&rtc->class_dev));
+	rtc_device_unregister(rtc);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+
+static struct timespec rtc_delta;
+static u8 irqstat;
+
+static int omap_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct rtc_time rtc_tm;
+	struct timespec time;
+
+	time.tv_nsec = 0;
+	omap_rtc_read_time(NULL, &rtc_tm);
+	rtc_tm_to_time(&rtc_tm, &time.tv_sec);
+
+	save_time_delta(&rtc_delta, &time);
+	irqstat = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+
+	/* FIXME the RTC alarm is not currently acting as a wakeup event
+	 * source, and in fact this enable() call is just saving a flag
+	 * that's never used...
+	 */
+	if (device_may_wakeup(&pdev->dev))
+		enable_irq_wake(omap_rtc_alarm);
+	else
+		rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+
+	return 0;
+}
+
+static int omap_rtc_resume(struct platform_device *pdev)
+{
+	struct rtc_time rtc_tm;
+	struct timespec time;
+
+	time.tv_nsec = 0;
+	omap_rtc_read_time(NULL, &rtc_tm);
+	rtc_tm_to_time(&rtc_tm, &time.tv_sec);
+
+	restore_time_delta(&rtc_delta, &time);
+	if (device_may_wakeup(&pdev->dev))
+		disable_irq_wake(omap_rtc_alarm);
+	else
+		rtc_write(irqstat, OMAP_RTC_INTERRUPTS_REG);
+	return 0;
+}
+
+#else
+#define omap_rtc_suspend NULL
+#define omap_rtc_resume  NULL
+#endif
+
+static void omap_rtc_shutdown(struct platform_device *pdev)
+{
+	rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+}
+
+MODULE_ALIAS("omap_rtc");
+static struct platform_driver omap_rtc_driver = {
+	.probe		= omap_rtc_probe,
+	.remove		= __devexit_p(omap_rtc_remove),
+	.suspend	= omap_rtc_suspend,
+	.resume		= omap_rtc_resume,
+	.shutdown	= omap_rtc_shutdown,
+	.driver		= {
+		.name	= "omap_rtc",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init rtc_init(void)
+{
+	return platform_driver_register(&omap_rtc_driver);
+}
+module_init(rtc_init);
+
+static void __exit rtc_exit(void)
+{
+	platform_driver_unregister(&omap_rtc_driver);
+}
+module_exit(rtc_exit);
+
+MODULE_AUTHOR("George G. Davis (and others)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index a44fe4e..e2c7698 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -13,7 +13,7 @@
 #include <linux/rtc.h>
 #include <linux/bcd.h>
 
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
 
 /* Addresses to scan */
 static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
@@ -39,6 +39,14 @@
 static int rs5c372_detach(struct i2c_client *client);
 static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind);
 
+struct rs5c372 {
+	u8 reg_addr;
+	u8 regs[17];
+	struct i2c_msg msg[1];
+	struct i2c_client client;
+	struct rtc_device *rtc;
+};
+
 static struct i2c_driver rs5c372_driver = {
 	.driver		= {
 		.name	= "rs5c372",
@@ -49,18 +57,16 @@
 
 static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
-	unsigned char buf[7] = { RS5C372_REG_BASE };
 
-	/* this implements the 1st reading method, according
-	 * to the datasheet. buf[0] is initialized with
-	 * address ptr and transmission format register.
+	struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
+	u8 *buf = &(rs5c372->regs[1]);
+
+	/* this implements the 3rd reading method, according
+	 * to the datasheet. rs5c372 defaults to internal
+	 * address 0xF, so 0x0 is in regs[1]
 	 */
-	struct i2c_msg msgs[] = {
-		{ client->addr, 0, 1, buf },
-		{ client->addr, I2C_M_RD, 7, buf },
-	};
 
-	if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
+	if ((i2c_transfer(client->adapter, rs5c372->msg, 1)) != 1) {
 		dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
 		return -EIO;
 	}
@@ -114,23 +120,14 @@
 
 static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
 {
-	unsigned char buf = RS5C372_REG_TRIM;
-
-	struct i2c_msg msgs[] = {
-		{ client->addr, 0, 1, &buf },
-		{ client->addr, I2C_M_RD, 1, &buf },
-	};
-
-	if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
-		dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
-		return -EIO;
-	}
+	struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
+	u8 tmp = rs5c372->regs[RS5C372_REG_TRIM + 1];
 
 	if (osc)
-		*osc = (buf & RS5C372_TRIM_XSL) ? 32000 : 32768;
+		*osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
 
 	if (trim) {
-		*trim = buf & RS5C372_TRIM_MASK;
+		*trim = tmp & RS5C372_TRIM_MASK;
 		dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, *trim);
 	}
 
@@ -201,7 +198,7 @@
 {
 	int err = 0;
 	struct i2c_client *client;
-	struct rtc_device *rtc;
+	struct rs5c372 *rs5c372;
 
 	dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
 
@@ -210,10 +207,11 @@
 		goto exit;
 	}
 
-	if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
+	if (!(rs5c372 = kzalloc(sizeof(struct rs5c372), GFP_KERNEL))) {
 		err = -ENOMEM;
 		goto exit;
 	}
+	client = &rs5c372->client;
 
 	/* I2C client */
 	client->addr = address;
@@ -222,32 +220,47 @@
 
 	strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE);
 
+	i2c_set_clientdata(client, rs5c372);
+
+	rs5c372->msg[0].addr = address;
+	rs5c372->msg[0].flags = I2C_M_RD;
+	rs5c372->msg[0].len = sizeof(rs5c372->regs);
+	rs5c372->msg[0].buf = rs5c372->regs;
+
 	/* Inform the i2c layer */
 	if ((err = i2c_attach_client(client)))
 		goto exit_kfree;
 
 	dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
 
-	rtc = rtc_device_register(rs5c372_driver.driver.name, &client->dev,
-				&rs5c372_rtc_ops, THIS_MODULE);
+	rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
+				&client->dev, &rs5c372_rtc_ops, THIS_MODULE);
 
-	if (IS_ERR(rtc)) {
-		err = PTR_ERR(rtc);
+	if (IS_ERR(rs5c372->rtc)) {
+		err = PTR_ERR(rs5c372->rtc);
 		goto exit_detach;
 	}
 
-	i2c_set_clientdata(client, rtc);
-
-	device_create_file(&client->dev, &dev_attr_trim);
-	device_create_file(&client->dev, &dev_attr_osc);
+	err = device_create_file(&client->dev, &dev_attr_trim);
+	if (err)
+		goto exit_devreg;
+	err = device_create_file(&client->dev, &dev_attr_osc);
+	if (err)
+		goto exit_trim;
 
 	return 0;
 
+exit_trim:
+	device_remove_file(&client->dev, &dev_attr_trim);
+
+exit_devreg:
+	rtc_device_unregister(rs5c372->rtc);
+
 exit_detach:
 	i2c_detach_client(client);
 
 exit_kfree:
-	kfree(client);
+	kfree(rs5c372);
 
 exit:
 	return err;
@@ -256,16 +269,15 @@
 static int rs5c372_detach(struct i2c_client *client)
 {
 	int err;
-	struct rtc_device *rtc = i2c_get_clientdata(client);
+	struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
 
-	if (rtc)
-		rtc_device_unregister(rtc);
+	if (rs5c372->rtc)
+		rtc_device_unregister(rs5c372->rtc);
 
 	if ((err = i2c_detach_client(client)))
 		return err;
 
-	kfree(client);
-
+	kfree(rs5c372);
 	return 0;
 }
 
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 6ef9c62..f50a1b8 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -123,11 +123,18 @@
 		err = PTR_ERR(rtc);
 		return err;
 	}
-	device_create_file(&plat_dev->dev, &dev_attr_irq);
+
+	err = device_create_file(&plat_dev->dev, &dev_attr_irq);
+	if (err)
+		goto err;
 
 	platform_set_drvdata(plat_dev, rtc);
 
 	return 0;
+
+err:
+	rtc_device_unregister(rtc);
+	return err;
 }
 
 static int __devexit test_remove(struct platform_device *plat_dev)
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 522c697..9a67487 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -562,11 +562,19 @@
 	else
 		dev_err(&client->dev, "couldn't read status\n");
 
-	device_create_file(&client->dev, &dev_attr_atrim);
-	device_create_file(&client->dev, &dev_attr_dtrim);
+	err = device_create_file(&client->dev, &dev_attr_atrim);
+	if (err) goto exit_devreg;
+	err = device_create_file(&client->dev, &dev_attr_dtrim);
+	if (err) goto exit_atrim;
 
 	return 0;
 
+exit_atrim:
+	device_remove_file(&client->dev, &dev_attr_atrim);
+
+exit_devreg:
+	rtc_device_unregister(rtc);
+
 exit_detach:
 	i2c_detach_client(client);
 
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a2cef57..2af2d9b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -54,7 +54,7 @@
 static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
 static int dasd_flush_ccw_queue(struct dasd_device *, int);
 static void dasd_tasklet(struct dasd_device *);
-static void do_kick_device(void *data);
+static void do_kick_device(struct work_struct *);
 
 /*
  * SECTION: Operations on the device structure.
@@ -100,7 +100,7 @@
 		     (unsigned long) device);
 	INIT_LIST_HEAD(&device->ccw_queue);
 	init_timer(&device->timer);
-	INIT_WORK(&device->kick_work, do_kick_device, device);
+	INIT_WORK(&device->kick_work, do_kick_device);
 	device->state = DASD_STATE_NEW;
 	device->target = DASD_STATE_NEW;
 
@@ -407,11 +407,9 @@
  * event daemon.
  */
 static void
-do_kick_device(void *data)
+do_kick_device(struct work_struct *work)
 {
-	struct dasd_device *device;
-
-	device = (struct dasd_device *) data;
+	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
 	dasd_change_state(device);
 	dasd_schedule_bh(device);
 	dasd_put_device(device);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 17fdd8c..cf28ccc 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -25,7 +25,7 @@
 
 #include "dasd_int.h"
 
-kmem_cache_t *dasd_page_cache;
+struct kmem_cache *dasd_page_cache;
 EXPORT_SYMBOL_GPL(dasd_page_cache);
 
 /*
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5ecea3e..fdaa471 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1215,7 +1215,7 @@
 		dst = page_address(bv->bv_page) + bv->bv_offset;
 		if (dasd_page_cache) {
 			char *copy = kmem_cache_alloc(dasd_page_cache,
-						      SLAB_DMA | __GFP_NOWARN);
+						      GFP_DMA | __GFP_NOWARN);
 			if (copy && rq_data_dir(req) == WRITE)
 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
 			if (copy)
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 80926c5..b857fd5 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -308,7 +308,7 @@
 		dst = page_address(bv->bv_page) + bv->bv_offset;
 		if (dasd_page_cache) {
 			char *copy = kmem_cache_alloc(dasd_page_cache,
-						      SLAB_DMA | __GFP_NOWARN);
+						      GFP_DMA | __GFP_NOWARN);
 			if (copy && rq_data_dir(req) == WRITE)
 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
 			if (copy)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 9f52004..dc5dd50 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -474,7 +474,7 @@
 extern unsigned int dasd_profile_level;
 extern struct block_device_operations dasd_device_operations;
 
-extern kmem_cache_t *dasd_page_cache;
+extern struct kmem_cache *dasd_page_cache;
 
 struct dasd_ccw_req *
 dasd_kmalloc_request(char *, int, int, struct dasd_device *);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index ad7f7e1..26cf2f5 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -334,7 +334,7 @@
 static DEFINE_SPINLOCK(slow_subchannel_lock);
 
 static void
-css_trigger_slow_path(void)
+css_trigger_slow_path(struct work_struct *unused)
 {
 	CIO_TRACE_EVENT(4, "slowpath");
 
@@ -359,8 +359,7 @@
 	spin_unlock_irq(&slow_subchannel_lock);
 }
 
-typedef void (*workfunc)(void *);
-DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
+DECLARE_WORK(slow_path_work, css_trigger_slow_path);
 struct workqueue_struct *slow_path_wq;
 
 /* Reprobe subchannel if unregistered. */
@@ -397,7 +396,7 @@
 }
 
 /* Work function used to reprobe all unregistered subchannels. */
-static void reprobe_all(void *data)
+static void reprobe_all(struct work_struct *unused)
 {
 	int ret;
 
@@ -413,7 +412,7 @@
 		      need_reprobe);
 }
 
-DECLARE_WORK(css_reprobe_work, reprobe_all, NULL);
+DECLARE_WORK(css_reprobe_work, reprobe_all);
 
 /* Schedule reprobing of all unregistered subchannels. */
 void css_schedule_reprobe(void)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 6a54334..e4dc947 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -37,7 +37,7 @@
 #include "ap_bus.h"
 
 /* Some prototypes. */
-static void ap_scan_bus(void *);
+static void ap_scan_bus(struct work_struct *);
 static void ap_poll_all(unsigned long);
 static void ap_poll_timeout(unsigned long);
 static int ap_poll_thread_start(void);
@@ -71,7 +71,7 @@
 static struct workqueue_struct *ap_work_queue;
 static struct timer_list ap_config_timer;
 static int ap_config_time = AP_CONFIG_TIME;
-static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL);
+static DECLARE_WORK(ap_config_work, ap_scan_bus);
 
 /**
  * Tasklet & timer for AP request polling.
@@ -732,7 +732,7 @@
 	kfree(ap_dev);
 }
 
-static void ap_scan_bus(void *data)
+static void ap_scan_bus(struct work_struct *unused)
 {
 	struct ap_device *ap_dev;
 	struct device *dev;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 08d4e47..e5665b6 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -67,7 +67,7 @@
  * Some prototypes.
  */
 static void lcs_tasklet(unsigned long);
-static void lcs_start_kernel_thread(struct lcs_card *card);
+static void lcs_start_kernel_thread(struct work_struct *);
 static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
 static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
 static int lcs_recovery(void *ptr);
@@ -1724,8 +1724,9 @@
  * Kernel Thread helper functions for LGW initiated commands
  */
 static void
-lcs_start_kernel_thread(struct lcs_card *card)
+lcs_start_kernel_thread(struct work_struct *work)
 {
+	struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
 	LCS_DBF_TEXT(5, trace, "krnthrd");
 	if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
 		kernel_thread(lcs_recovery, (void *) card, SIGCHLD);
@@ -2053,8 +2054,7 @@
 	ccwgdev->cdev[0]->handler = lcs_irq;
 	ccwgdev->cdev[1]->handler = lcs_irq;
 	card->gdev = ccwgdev;
-	INIT_WORK(&card->kernel_thread_starter,
-		  (void *) lcs_start_kernel_thread, card);
+	INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
 	card->thread_start_mask = 0;
 	card->thread_allowed_mask = 0;
 	card->thread_running_mask = 0;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 7fdc527..2bde4f1 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1039,8 +1039,9 @@
 }
 
 static void
-qeth_start_kernel_thread(struct qeth_card *card)
+qeth_start_kernel_thread(struct work_struct *work)
 {
+	struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter);
 	QETH_DBF_TEXT(trace , 2, "strthrd");
 
 	if (card->read.state != CH_STATE_UP &&
@@ -1103,8 +1104,7 @@
 	card->thread_start_mask = 0;
 	card->thread_allowed_mask = 0;
 	card->thread_running_mask = 0;
-	INIT_WORK(&card->kernel_thread_starter,
-		  (void *)qeth_start_kernel_thread,card);
+	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
 	INIT_LIST_HEAD(&card->ip_list);
 	card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
 	if (!card->ip_tbd_list) {
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 74c0eac..32933ed 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -1032,9 +1032,9 @@
 	wwn_t                   init_wwpn;
 	fcp_lun_t               init_fcp_lun;
 	char 			*driver_version;
-	kmem_cache_t		*fsf_req_qtcb_cache;
-	kmem_cache_t		*sr_buffer_cache;
-	kmem_cache_t		*gid_pn_cache;
+	struct kmem_cache		*fsf_req_qtcb_cache;
+	struct kmem_cache		*sr_buffer_cache;
+	struct kmem_cache		*gid_pn_cache;
 };
 
 /**
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 277826c..067f151 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -109,7 +109,7 @@
 			ptr = kmalloc(size, GFP_ATOMIC);
 		else
 			ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
-					       SLAB_ATOMIC);
+					       GFP_ATOMIC);
 	}
 
 	if (unlikely(!ptr))
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 335a255..68103e5 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -313,7 +313,7 @@
 	hostdata->status = memory + STATUS_OFFSET;
 	/* all of these offsets are L1_CACHE_BYTES separated.  It is fatal
 	 * if this isn't sufficient separation to avoid dma flushing issues */
-	BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
+	BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
 	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
 	hostdata->dev = dev;
 
@@ -362,11 +362,11 @@
 	for (j = 0; j < PATCHES; j++)
 		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
 	/* now patch up fixed addresses. */
-	script_patch_32(script, MessageLocation,
+	script_patch_32(hostdata->dev, script, MessageLocation,
 			pScript + MSGOUT_OFFSET);
-	script_patch_32(script, StatusAddress,
+	script_patch_32(hostdata->dev, script, StatusAddress,
 			pScript + STATUS_OFFSET);
-	script_patch_32(script, ReceiveMsgAddress,
+	script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
 			pScript + MSGIN_OFFSET);
 
 	hostdata->script = script;
@@ -821,8 +821,9 @@
 			shost_printk(KERN_WARNING, host,
 				"Unexpected SDTR msg\n");
 			hostdata->msgout[0] = A_REJECT_MSG;
-			dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-			script_patch_16(hostdata->script, MessageCount, 1);
+			dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+			script_patch_16(hostdata->dev, hostdata->script,
+			                MessageCount, 1);
 			/* SendMsgOut returns, so set up the return
 			 * address */
 			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -833,8 +834,9 @@
 		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
 		       host->host_no, pun, lun);
 		hostdata->msgout[0] = A_REJECT_MSG;
-		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-		script_patch_16(hostdata->script, MessageCount, 1);
+		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+		                1);
 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 
 		break;
@@ -847,8 +849,9 @@
 		printk("\n");
 		/* just reject it */
 		hostdata->msgout[0] = A_REJECT_MSG;
-		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-		script_patch_16(hostdata->script, MessageCount, 1);
+		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+		                1);
 		/* SendMsgOut returns, so set up the return
 		 * address */
 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -929,8 +932,9 @@
 		printk("\n");
 		/* just reject it */
 		hostdata->msgout[0] = A_REJECT_MSG;
-		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-		script_patch_16(hostdata->script, MessageCount, 1);
+		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+		                1);
 		/* SendMsgOut returns, so set up the return
 		 * address */
 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -939,7 +943,7 @@
 	}
 	NCR_700_writel(temp, host, TEMP_REG);
 	/* set us up to receive another message */
-	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
+	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
 	return resume_offset;
 }
 
@@ -1019,9 +1023,9 @@
 				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
 				slot->SG[1].pAddr = 0;
 				slot->resume_offset = hostdata->pScript;
-				dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
-				dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
-				
+				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
+				dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
+
 				/* queue the command for reissue */
 				slot->state = NCR_700_SLOT_QUEUED;
 				slot->flags = NCR_700_FLAG_AUTOSENSE;
@@ -1136,11 +1140,12 @@
 			hostdata->cmd = slot->cmnd;
 
 			/* re-patch for this command */
-			script_patch_32_abs(hostdata->script, CommandAddress, 
-					    slot->pCmd);
-			script_patch_16(hostdata->script,
+			script_patch_32_abs(hostdata->dev, hostdata->script,
+			                    CommandAddress, slot->pCmd);
+			script_patch_16(hostdata->dev, hostdata->script,
 					CommandCount, slot->cmnd->cmd_len);
-			script_patch_32_abs(hostdata->script, SGScriptStartAddress,
+			script_patch_32_abs(hostdata->dev, hostdata->script,
+			                    SGScriptStartAddress,
 					    to32bit(&slot->pSG[0].ins));
 
 			/* Note: setting SXFER only works if we're
@@ -1150,13 +1155,13 @@
 			 * should therefore always clear ACK */
 			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
 				       host, SXFER_REG);
-			dma_cache_sync(hostdata->msgin,
+			dma_cache_sync(hostdata->dev, hostdata->msgin,
 				       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
-			dma_cache_sync(hostdata->msgout,
+			dma_cache_sync(hostdata->dev, hostdata->msgout,
 				       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
 			/* I'm just being paranoid here, the command should
 			 * already have been flushed from the cache */
-			dma_cache_sync(slot->cmnd->cmnd,
+			dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
 				       slot->cmnd->cmd_len, DMA_TO_DEVICE);
 
 
@@ -1220,7 +1225,7 @@
 		hostdata->reselection_id = reselection_id;
 		/* just in case we have a stale simple tag message, clear it */
 		hostdata->msgin[1] = 0;
-		dma_cache_sync(hostdata->msgin,
+		dma_cache_sync(hostdata->dev, hostdata->msgin,
 			       MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
 		if(hostdata->tag_negotiated & (1<<reselection_id)) {
 			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
@@ -1336,7 +1341,7 @@
 	hostdata->cmd = NULL;
 	/* clear any stale simple tag message */
 	hostdata->msgin[1] = 0;
-	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
+	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
 		       DMA_BIDIRECTIONAL);
 
 	if(id == 0xff) {
@@ -1433,29 +1438,30 @@
 		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 	}
 
-	script_patch_16(hostdata->script, MessageCount, count);
+	script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
 
 
-	script_patch_ID(hostdata->script,
+	script_patch_ID(hostdata->dev, hostdata->script,
 			Device_ID, 1<<scmd_id(SCp));
 
-	script_patch_32_abs(hostdata->script, CommandAddress, 
+	script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
 			    slot->pCmd);
-	script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
+	script_patch_16(hostdata->dev, hostdata->script, CommandCount,
+	                SCp->cmd_len);
 	/* finally plumb the beginning of the SG list into the script
 	 * */
-	script_patch_32_abs(hostdata->script, SGScriptStartAddress,
-			    to32bit(&slot->pSG[0].ins));
+	script_patch_32_abs(hostdata->dev, hostdata->script,
+	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
 	NCR_700_clear_fifo(SCp->device->host);
 
 	if(slot->resume_offset == 0)
 		slot->resume_offset = hostdata->pScript;
 	/* now perform all the writebacks and invalidates */
-	dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
-	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
+	dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
+	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
 		       DMA_FROM_DEVICE);
-	dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
-	dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
+	dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
+	dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
 
 	/* set the synchronous period/offset */
 	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
@@ -1631,7 +1637,7 @@
 					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
 					slot->SG[i].pAddr = 0;
 				}
-				dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
 				/* and pretend we disconnected after
 				 * the command phase */
 				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
@@ -1897,9 +1903,9 @@
 		}
 		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
 		slot->SG[i].pAddr = 0;
-		dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+		dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
 		DEBUG((" SETTING %08lx to %x\n",
-		       (&slot->pSG[i].ins), 
+		       (&slot->pSG[i].ins),
 		       slot->SG[i].ins));
 	}
 	slot->resume_offset = 0;
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index f5c3caf..f38822d 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -415,31 +415,31 @@
 #define NCR_710_MIN_XFERP	0
 #define NCR_700_MIN_PERIOD	25 /* for SDTR message, 100ns */
 
-#define script_patch_32(script, symbol, value) \
+#define script_patch_32(dev, script, symbol, value) \
 { \
 	int i; \
 	for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
 		__u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \
 		(script)[A_##symbol##_used[i]] = bS_to_host(val); \
-		dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+		dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
 		DEBUG((" script, patching %s at %d to 0x%lx\n", \
 		       #symbol, A_##symbol##_used[i], (value))); \
 	} \
 }
 
-#define script_patch_32_abs(script, symbol, value) \
+#define script_patch_32_abs(dev, script, symbol, value) \
 { \
 	int i; \
 	for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
 		(script)[A_##symbol##_used[i]] = bS_to_host(value); \
-		dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+		dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
 		DEBUG((" script, patching %s at %d to 0x%lx\n", \
 		       #symbol, A_##symbol##_used[i], (value))); \
 	} \
 }
 
 /* Used for patching the SCSI ID in the SELECT instruction */
-#define script_patch_ID(script, symbol, value) \
+#define script_patch_ID(dev, script, symbol, value) \
 { \
 	int i; \
 	for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -447,13 +447,13 @@
 		val &= 0xff00ffff; \
 		val |= ((value) & 0xff) << 16; \
 		(script)[A_##symbol##_used[i]] = bS_to_host(val); \
-		dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+		dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
 		DEBUG((" script, patching ID field %s at %d to 0x%x\n", \
 		       #symbol, A_##symbol##_used[i], val)); \
 	} \
 }
 
-#define script_patch_16(script, symbol, value) \
+#define script_patch_16(dev, script, symbol, value) \
 { \
 	int i; \
 	for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -461,7 +461,7 @@
 		val &= 0xffff0000; \
 		val |= ((value) & 0xffff); \
 		(script)[A_##symbol##_used[i]] = bS_to_host(val); \
-		dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+		dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
 		DEBUG((" script, patching short field %s at %d to 0x%x\n", \
 		       #symbol, A_##symbol##_used[i], val)); \
 	} \
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index a6aa910..bb3cb33 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -849,7 +849,7 @@
 	hostdata->issue_queue = NULL;
 	hostdata->disconnected_queue = NULL;
 	
-	INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata);
+	INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
 	
 #ifdef NCR5380_STATS
 	for (i = 0; i < 8; ++i) {
@@ -1016,7 +1016,7 @@
 
 	/* Run the coroutine if it isn't already running. */
 	/* Kick off command processing */
-	schedule_work(&hostdata->coroutine);
+	schedule_delayed_work(&hostdata->coroutine, 0);
 	return 0;
 }
 
@@ -1033,9 +1033,10 @@
  *	host lock and called routines may take the isa dma lock.
  */
 
-static void NCR5380_main(void *p)
+static void NCR5380_main(struct work_struct *work)
 {
-	struct NCR5380_hostdata *hostdata = p;
+	struct NCR5380_hostdata *hostdata =
+		container_of(work, struct NCR5380_hostdata, coroutine.work);
 	struct Scsi_Host *instance = hostdata->host;
 	Scsi_Cmnd *tmp, *prev;
 	int done;
@@ -1221,7 +1222,7 @@
 		}	/* if BASR_IRQ */
 		spin_unlock_irqrestore(instance->host_lock, flags);
 		if(!done)
-			schedule_work(&hostdata->coroutine);
+			schedule_delayed_work(&hostdata->coroutine, 0);
 	} while (!done);
 	return IRQ_HANDLED;
 }
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 1bc73de..713a108 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -271,7 +271,7 @@
 	unsigned long time_expires;		/* in jiffies, set prior to sleeping */
 	int select_time;			/* timer in select for target response */
 	volatile Scsi_Cmnd *selecting;
-	struct work_struct coroutine;		/* our co-routine */
+	struct delayed_work coroutine;		/* our co-routine */
 #ifdef NCR5380_STATS
 	unsigned timebase;			/* Base for time calcs */
 	long time_read[8];			/* time to do reads */
@@ -298,7 +298,7 @@
 #ifndef DONT_USE_INTR
 static irqreturn_t NCR5380_intr(int irq, void *dev_id);
 #endif
-static void NCR5380_main(void *ptr);
+static void NCR5380_main(struct work_struct *work);
 static void NCR5380_print_options(struct Scsi_Host *instance);
 #ifdef NDEBUG
 static void NCR5380_print_phase(struct Scsi_Host *instance);
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 306f46b..0cec742 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1443,7 +1443,7 @@
  * Run service completions on the card with interrupts enabled.
  *
  */
-static void run(void)
+static void run(struct work_struct *work)
 {
 	struct aha152x_hostdata *hd;
 
@@ -1499,7 +1499,7 @@
 		HOSTDATA(shpnt)->service=1;
 
 		/* Poke the BH handler */
-		INIT_WORK(&aha152x_tq, (void *) run, NULL);
+		INIT_WORK(&aha152x_tq, run);
 		schedule_work(&aha152x_tq);
 	}
 	DO_UNLOCK(flags);
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 71a031d..32f513b 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -56,8 +56,8 @@
 /* 2*ITNL timeout + 1 second */
 #define AIC94XX_SCB_TIMEOUT  (5*HZ)
 
-extern kmem_cache_t *asd_dma_token_cache;
-extern kmem_cache_t *asd_ascb_cache;
+extern struct kmem_cache *asd_dma_token_cache;
+extern struct kmem_cache *asd_ascb_cache;
 extern char sas_addr_str[2*SAS_ADDR_SIZE + 1];
 
 static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index af7e011..da94e12 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1047,7 +1047,7 @@
 static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
 					      gfp_t gfp_flags)
 {
-	extern kmem_cache_t *asd_ascb_cache;
+	extern struct kmem_cache *asd_ascb_cache;
 	struct asd_seq_data *seq = &asd_ha->seq;
 	struct asd_ascb *ascb;
 	unsigned long flags;
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 42302ef..fbc82b0 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -450,8 +450,8 @@
 	asd_ha->scb_pool = NULL;
 }
 
-kmem_cache_t *asd_dma_token_cache;
-kmem_cache_t *asd_ascb_cache;
+struct kmem_cache *asd_dma_token_cache;
+struct kmem_cache *asd_ascb_cache;
 
 static int asd_create_global_caches(void)
 {
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 14d5d8c..75ed6b0 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -414,9 +414,10 @@
 }
 
 /* hard reset a phy later */
-static void do_phy_reset_later(void *data)
+static void do_phy_reset_later(struct work_struct *work)
 {
-	struct sas_phy *sas_phy = data;
+	struct sas_phy *sas_phy =
+		container_of(work, struct sas_phy, reset_work);
 	int error;
 
 	ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
@@ -430,7 +431,7 @@
 
 static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
 {
-	INIT_WORK(&sas_phy->reset_work, do_phy_reset_later, sas_phy);
+	INIT_WORK(&sas_phy->reset_work, do_phy_reset_later);
 	queue_work(shost->work_q, &sas_phy->reset_work);
 }
 
@@ -442,7 +443,7 @@
 	struct Scsi_Host *shost = sas_ha->core.shost;
 	struct sas_task *task = ascb->uldd_task;
 
-	INIT_WORK(&task->abort_work, (void (*)(void *))sas_task_abort, task);
+	INIT_WORK(&task->abort_work, sas_task_abort);
 	queue_work(shost->work_q, &task->abort_work);
 }
 
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 0e74174..e28260f 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -67,6 +67,7 @@
 
 	unsigned long liobn;
 	unsigned long riobn;
+	struct srp_target *target;
 };
 
 static struct workqueue_struct *vtgtd;
@@ -685,10 +686,10 @@
 	return crq;
 }
 
-static void handle_crq(void *data)
+static void handle_crq(struct work_struct *work)
 {
-	struct srp_target *target = (struct srp_target *) data;
-	struct vio_port *vport = target_to_port(target);
+	struct vio_port *vport = container_of(work, struct vio_port, crq_work);
+	struct srp_target *target = vport->target;
 	struct viosrp_crq *crq;
 	int done = 0;
 
@@ -822,6 +823,7 @@
 	target->shost = shost;
 	vport->dma_dev = dev;
 	target->ldata = vport;
+	vport->target = target;
 	err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
 			       SRP_MAX_IU_LEN);
 	if (err)
@@ -837,7 +839,7 @@
 	vport->liobn = dma[0];
 	vport->riobn = dma[5];
 
-	INIT_WORK(&vport->crq_work, handle_crq, target);
+	INIT_WORK(&vport->crq_work, handle_crq);
 
 	err = crq_queue_create(&vport->crq_queue, target);
 	if (err)
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 1427a41..8f6b5bf 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -110,6 +110,7 @@
 } idescsi_scsi_t;
 
 static DEFINE_MUTEX(idescsi_ref_mutex);
+static int idescsi_nocd;			/* Set by module param to skip cd */
 
 #define ide_scsi_g(disk) \
 	container_of((disk)->private_data, struct ide_scsi_obj, driver)
@@ -1127,6 +1128,9 @@
 		warned = 1;
 	}
 
+	if (idescsi_nocd && drive->media == ide_cdrom)
+		return -ENODEV;
+
 	if (!strstr("ide-scsi", drive->driver_req) ||
 	    !drive->present ||
 	    drive->media == ide_disk ||
@@ -1187,6 +1191,8 @@
 	driver_unregister(&idescsi_driver.gen_driver);
 }
 
+module_param(idescsi_nocd, int, 0600);
+MODULE_PARM_DESC(idescsi_nocd, "Disable handling of CD-ROMs so they may be driven by ide-cd");
 module_init(init_idescsi_module);
 module_exit(exit_idescsi_module);
 MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index e31f612..0464c18 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -36,7 +36,7 @@
 	int base_hi;		/* Hi Base address for ECP-ISA chipset */
 	int mode;		/* Transfer mode                */
 	struct scsi_cmnd *cur_cmd;	/* Current queued command       */
-	struct work_struct imm_tq;	/* Polling interrupt stuff       */
+	struct delayed_work imm_tq;	/* Polling interrupt stuff       */
 	unsigned long jstart;	/* Jiffies at start             */
 	unsigned failed:1;	/* Failure flag                 */
 	unsigned dp:1;		/* Data phase present           */
@@ -733,9 +733,9 @@
  * the scheduler's task queue to generate a stream of call-backs and
  * complete the request when the drive is ready.
  */
-static void imm_interrupt(void *data)
+static void imm_interrupt(struct work_struct *work)
 {
-	imm_struct *dev = (imm_struct *) data;
+	imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
 	struct scsi_cmnd *cmd = dev->cur_cmd;
 	struct Scsi_Host *host = cmd->device->host;
 	unsigned long flags;
@@ -745,7 +745,6 @@
 		return;
 	}
 	if (imm_engine(dev, cmd)) {
-		INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev);
 		schedule_delayed_work(&dev->imm_tq, 1);
 		return;
 	}
@@ -953,8 +952,7 @@
 	cmd->result = DID_ERROR << 16;	/* default return code */
 	cmd->SCp.phase = 0;	/* bus free */
 
-	INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
-	schedule_work(&dev->imm_tq);
+	schedule_delayed_work(&dev->imm_tq, 0);
 
 	imm_pb_claim(dev);
 
@@ -1225,7 +1223,7 @@
 	else
 		ports = 8;
 
-	INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
+	INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt);
 
 	err = -ENOMEM;
 	host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2d83fbb..b318500 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2307,7 +2307,7 @@
 
 /**
  * ipr_worker_thread - Worker thread
- * @data:		ioa config struct
+ * @work:		ioa config struct
  *
  * Called at task level from a work thread. This function takes care
  * of adding and removing device from the mid-layer as configuration
@@ -2316,13 +2316,14 @@
  * Return value:
  * 	nothing
  **/
-static void ipr_worker_thread(void *data)
+static void ipr_worker_thread(struct work_struct *work)
 {
 	unsigned long lock_flags;
 	struct ipr_resource_entry *res;
 	struct scsi_device *sdev;
 	struct ipr_dump *dump;
-	struct ipr_ioa_cfg *ioa_cfg = data;
+	struct ipr_ioa_cfg *ioa_cfg =
+		container_of(work, struct ipr_ioa_cfg, work_q);
 	u8 bus, target, lun;
 	int did_work;
 
@@ -6939,7 +6940,7 @@
 		return -ENOMEM;
 
 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
-		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
+		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
 
 		if (!ipr_cmd) {
 			ipr_free_cmd_blks(ioa_cfg);
@@ -7121,7 +7122,7 @@
 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
-	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
+	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
 	ioa_cfg->sdt_state = INACTIVE;
 	if (ipr_enable_cache)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 5d88621..e11b23c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -719,9 +719,10 @@
 	return rc;
 }
 
-static void iscsi_xmitworker(void *data)
+static void iscsi_xmitworker(struct work_struct *work)
 {
-	struct iscsi_conn *conn = data;
+	struct iscsi_conn *conn =
+		container_of(work, struct iscsi_conn, xmitwork);
 	int rc;
 	/*
 	 * serialize Xmit worker on a per-connection basis.
@@ -1512,7 +1513,7 @@
 	if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
 		goto mgmtqueue_alloc_fail;
 
-	INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
+	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
 	/* allocate login_mtask used for the login/text sequences */
 	spin_lock_bh(&session->lock);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d977bd4..fb7df7b 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -647,10 +647,12 @@
  * Discover process only interrogates devices in order to discover the
  * domain.
  */
-static void sas_discover_domain(void *data)
+static void sas_discover_domain(struct work_struct *work)
 {
 	int error = 0;
-	struct asd_sas_port *port = data;
+	struct sas_discovery_event *ev =
+		container_of(work, struct sas_discovery_event, work);
+	struct asd_sas_port *port = ev->port;
 
 	sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
 			&port->disc.pending);
@@ -692,10 +694,12 @@
 		    current->pid, error);
 }
 
-static void sas_revalidate_domain(void *data)
+static void sas_revalidate_domain(struct work_struct *work)
 {
 	int res = 0;
-	struct asd_sas_port *port = data;
+	struct sas_discovery_event *ev =
+		container_of(work, struct sas_discovery_event, work);
+	struct asd_sas_port *port = ev->port;
 
 	sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
 			&port->disc.pending);
@@ -722,7 +726,7 @@
 	BUG_ON(ev >= DISC_NUM_EVENTS);
 
 	sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
-			&disc->disc_work[ev], port->ha->core.shost);
+			&disc->disc_work[ev].work, port->ha->core.shost);
 
 	return 0;
 }
@@ -737,13 +741,15 @@
 {
 	int i;
 
-	static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = {
+	static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
 		[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
 		[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
 	};
 
 	spin_lock_init(&disc->disc_event_lock);
 	disc->pending = 0;
-	for (i = 0; i < DISC_NUM_EVENTS; i++)
-		INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port);
+	for (i = 0; i < DISC_NUM_EVENTS; i++) {
+		INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
+		disc->disc_work[i].port = port;
+	}
 }
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 19110ed..d83392e 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -31,7 +31,7 @@
 	BUG_ON(event >= HA_NUM_EVENTS);
 
 	sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
-			&sas_ha->ha_events[event], sas_ha->core.shost);
+			&sas_ha->ha_events[event].work, sas_ha->core.shost);
 }
 
 static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
@@ -41,7 +41,7 @@
 	BUG_ON(event >= PORT_NUM_EVENTS);
 
 	sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
-			&phy->port_events[event], ha->core.shost);
+			&phy->port_events[event].work, ha->core.shost);
 }
 
 static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
@@ -51,12 +51,12 @@
 	BUG_ON(event >= PHY_NUM_EVENTS);
 
 	sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
-			&phy->phy_events[event], ha->core.shost);
+			&phy->phy_events[event].work, ha->core.shost);
 }
 
 int sas_init_events(struct sas_ha_struct *sas_ha)
 {
-	static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = {
+	static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
 		[HAE_RESET] = sas_hae_reset,
 	};
 
@@ -64,8 +64,10 @@
 
 	spin_lock_init(&sas_ha->event_lock);
 
-	for (i = 0; i < HA_NUM_EVENTS; i++)
-		INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha);
+	for (i = 0; i < HA_NUM_EVENTS; i++) {
+		INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
+		sas_ha->ha_events[i].ha = sas_ha;
+	}
 
 	sas_ha->notify_ha_event = notify_ha_event;
 	sas_ha->notify_port_event = notify_port_event;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 0fb347b..2f0c07f 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -36,7 +36,7 @@
 
 #include "../scsi_sas_internal.h"
 
-kmem_cache_t *sas_task_cache;
+struct kmem_cache *sas_task_cache;
 
 /*------------ SAS addr hash -----------*/
 void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
@@ -65,9 +65,11 @@
 
 /* ---------- HA events ---------- */
 
-void sas_hae_reset(void *data)
+void sas_hae_reset(struct work_struct *work)
 {
-	struct sas_ha_struct *ha = data;
+	struct sas_ha_event *ev =
+		container_of(work, struct sas_ha_event, work);
+	struct sas_ha_struct *ha = ev->ha;
 
 	sas_begin_event(HAE_RESET, &ha->event_lock,
 			&ha->pending);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index bffcee4..137d7e4 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -60,11 +60,11 @@
 
 void sas_deform_port(struct asd_sas_phy *phy);
 
-void sas_porte_bytes_dmaed(void *);
-void sas_porte_broadcast_rcvd(void *);
-void sas_porte_link_reset_err(void *);
-void sas_porte_timer_event(void *);
-void sas_porte_hard_reset(void *);
+void sas_porte_bytes_dmaed(struct work_struct *work);
+void sas_porte_broadcast_rcvd(struct work_struct *work);
+void sas_porte_link_reset_err(struct work_struct *work);
+void sas_porte_timer_event(struct work_struct *work);
+void sas_porte_hard_reset(struct work_struct *work);
 
 int sas_notify_lldd_dev_found(struct domain_device *);
 void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -75,7 +75,7 @@
 
 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
 
-void sas_hae_reset(void *);
+void sas_hae_reset(struct work_struct *work);
 
 static inline void sas_queue_event(int event, spinlock_t *lock,
 				   unsigned long *pending,
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 9340cdb..b459c4b 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -30,9 +30,11 @@
 
 /* ---------- Phy events ---------- */
 
-static void sas_phye_loss_of_signal(void *data)
+static void sas_phye_loss_of_signal(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
 			&phy->phy_events_pending);
@@ -40,18 +42,22 @@
 	sas_deform_port(phy);
 }
 
-static void sas_phye_oob_done(void *data)
+static void sas_phye_oob_done(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
 			&phy->phy_events_pending);
 	phy->error = 0;
 }
 
-static void sas_phye_oob_error(void *data)
+static void sas_phye_oob_error(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 	struct sas_ha_struct *sas_ha = phy->ha;
 	struct asd_sas_port *port = phy->port;
 	struct sas_internal *i =
@@ -80,9 +86,11 @@
 	}
 }
 
-static void sas_phye_spinup_hold(void *data)
+static void sas_phye_spinup_hold(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 	struct sas_ha_struct *sas_ha = phy->ha;
 	struct sas_internal *i =
 		to_sas_internal(sas_ha->core.shost->transportt);
@@ -100,14 +108,14 @@
 {
 	int i;
 
-	static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = {
+	static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
 		[PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
 		[PHYE_OOB_DONE] = sas_phye_oob_done,
 		[PHYE_OOB_ERROR] = sas_phye_oob_error,
 		[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
 	};
 
-	static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = {
+	static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
 		[PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
 		[PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
 		[PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
@@ -122,13 +130,18 @@
 
 		phy->error = 0;
 		INIT_LIST_HEAD(&phy->port_phy_el);
-		for (k = 0; k < PORT_NUM_EVENTS; k++)
-			INIT_WORK(&phy->port_events[k], sas_port_event_fns[k],
-				  phy);
+		for (k = 0; k < PORT_NUM_EVENTS; k++) {
+			INIT_WORK(&phy->port_events[k].work,
+				  sas_port_event_fns[k]);
+			phy->port_events[k].phy = phy;
+		}
 
-		for (k = 0; k < PHY_NUM_EVENTS; k++)
-			INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
-				  phy);
+		for (k = 0; k < PHY_NUM_EVENTS; k++) {
+			INIT_WORK(&phy->phy_events[k].work,
+				  sas_phy_event_fns[k]);
+			phy->phy_events[k].phy = phy;
+		}
+
 		phy->port = NULL;
 		phy->ha = sas_ha;
 		spin_lock_init(&phy->frame_rcvd_lock);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 253cdcf..971c37c 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -181,9 +181,11 @@
 
 /* ---------- SAS port events ---------- */
 
-void sas_porte_bytes_dmaed(void *data)
+void sas_porte_bytes_dmaed(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
 			&phy->port_events_pending);
@@ -191,11 +193,13 @@
 	sas_form_port(phy);
 }
 
-void sas_porte_broadcast_rcvd(void *data)
+void sas_porte_broadcast_rcvd(struct work_struct *work)
 {
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 	unsigned long flags;
 	u32 prim;
-	struct asd_sas_phy *phy = data;
 
 	sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
 			&phy->port_events_pending);
@@ -208,9 +212,11 @@
 	sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
 }
 
-void sas_porte_link_reset_err(void *data)
+void sas_porte_link_reset_err(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
 			&phy->port_events_pending);
@@ -218,9 +224,11 @@
 	sas_deform_port(phy);
 }
 
-void sas_porte_timer_event(void *data)
+void sas_porte_timer_event(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
 			&phy->port_events_pending);
@@ -228,9 +236,11 @@
 	sas_deform_port(phy);
 }
 
-void sas_porte_hard_reset(void *data)
+void sas_porte_hard_reset(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
 			&phy->port_events_pending);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index e064aac..22672d5 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -846,8 +846,10 @@
 	return -EAGAIN;
 }
 
-void sas_task_abort(struct sas_task *task)
+void sas_task_abort(struct work_struct *work)
 {
+	struct sas_task *task =
+		container_of(work, struct sas_task, abort_work);
 	int i;
 
 	for (i = 0; i < 5; i++)
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index dd67a68..c116a6a 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -72,12 +72,12 @@
 static int  oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x);
 
 #ifdef USE_BOTTOM_HALF
-static void dma_commit(void *opaque);
+static void dma_commit(struct work_struct *unused);
 
 long oktag_to_io(long *paddr, long *addr, long len);
 long oktag_from_io(long *addr, long *paddr, long len);
 
-static DECLARE_WORK(tq_fake_dma, dma_commit, NULL);
+static DECLARE_WORK(tq_fake_dma, dma_commit);
 
 #define DMA_MAXTRANSFER 0x8000
 
@@ -266,7 +266,7 @@
  */
  
  
-static void dma_commit(void *opaque)
+static void dma_commit(struct work_struct *unused)
 {
     long wait,len2,pos;
     struct NCR_ESP *esp;
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 89a2a9f..584ba4d 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -31,7 +31,7 @@
 	int base;		/* Actual port address          */
 	int mode;		/* Transfer mode                */
 	struct scsi_cmnd *cur_cmd;	/* Current queued command       */
-	struct work_struct ppa_tq;	/* Polling interrupt stuff       */
+	struct delayed_work ppa_tq;	/* Polling interrupt stuff       */
 	unsigned long jstart;	/* Jiffies at start             */
 	unsigned long recon_tmo;	/* How many usecs to wait for reconnection (6th bit) */
 	unsigned int failed:1;	/* Failure flag                 */
@@ -627,9 +627,9 @@
  * the scheduler's task queue to generate a stream of call-backs and
  * complete the request when the drive is ready.
  */
-static void ppa_interrupt(void *data)
+static void ppa_interrupt(struct work_struct *work)
 {
-	ppa_struct *dev = (ppa_struct *) data;
+	ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
 	struct scsi_cmnd *cmd = dev->cur_cmd;
 
 	if (!cmd) {
@@ -637,7 +637,6 @@
 		return;
 	}
 	if (ppa_engine(dev, cmd)) {
-		dev->ppa_tq.data = (void *) dev;
 		schedule_delayed_work(&dev->ppa_tq, 1);
 		return;
 	}
@@ -822,8 +821,7 @@
 	cmd->result = DID_ERROR << 16;	/* default return code */
 	cmd->SCp.phase = 0;	/* bus free */
 
-	dev->ppa_tq.data = dev;
-	schedule_work(&dev->ppa_tq);
+	schedule_delayed_work(&dev->ppa_tq, 0);
 
 	ppa_pb_claim(dev);
 
@@ -1086,7 +1084,7 @@
 	else
 		ports = 8;
 
-	INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev);
+	INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
 
 	err = -ENOMEM;
 	host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index cbe0cad..d03523d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -24,7 +24,7 @@
 /*
  * SRB allocation cache
  */
-static kmem_cache_t *srb_cachep;
+static struct kmem_cache *srb_cachep;
 
 /*
  * Ioctl related information.
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index db9d88e..9ef693c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -19,7 +19,7 @@
 /*
  * SRB allocation cache
  */
-static kmem_cache_t *srb_cachep;
+static struct kmem_cache *srb_cachep;
 
 /*
  * Module parameter information and variables
@@ -961,9 +961,10 @@
  * the mid-level tries to sleep when it reaches the driver threshold
  * "host->can_queue". This can cause a panic if we were in our interrupt code.
  **/
-static void qla4xxx_do_dpc(void *data)
+static void qla4xxx_do_dpc(struct work_struct *work)
 {
-	struct scsi_qla_host *ha = (struct scsi_qla_host *) data;
+	struct scsi_qla_host *ha =
+		container_of(work, struct scsi_qla_host, dpc_work);
 	struct ddb_entry *ddb_entry, *dtemp;
 
 	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
@@ -1253,7 +1254,7 @@
 		ret = -ENODEV;
 		goto probe_failed;
 	}
-	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha);
+	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
 
 	ret = request_irq(pdev->irq, qla4xxx_intr_handler,
 			  SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index fafc00d..24cffd9 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -136,7 +136,7 @@
 EXPORT_SYMBOL(scsi_device_type);
 
 struct scsi_host_cmd_pool {
-	kmem_cache_t	*slab;
+	struct kmem_cache	*slab;
 	unsigned int	users;
 	char		*name;
 	unsigned int	slab_flags;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fb616c6..1748e27 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -36,7 +36,7 @@
 struct scsi_host_sg_pool {
 	size_t		size;
 	char		*name; 
-	kmem_cache_t	*slab;
+	struct kmem_cache	*slab;
 	mempool_t	*pool;
 };
 
@@ -241,7 +241,7 @@
 	char sense[SCSI_SENSE_BUFFERSIZE];
 };
 
-static kmem_cache_t *scsi_io_context_cache;
+static struct kmem_cache *scsi_io_context_cache;
 
 static void scsi_end_async(struct request *req, int uptodate)
 {
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 4d656148..14e635a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -437,9 +437,10 @@
 	goto retry;
 }
 
-static void scsi_target_reap_usercontext(void *data)
+static void scsi_target_reap_usercontext(struct work_struct *work)
 {
-	struct scsi_target *starget = data;
+	struct scsi_target *starget =
+		container_of(work, struct scsi_target, ew.work);
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 	unsigned long flags;
 
@@ -475,7 +476,7 @@
 		starget->state = STARGET_DEL;
 		spin_unlock_irqrestore(shost->host_lock, flags);
 		execute_in_process_context(scsi_target_reap_usercontext,
-					   starget, &starget->ew);
+					   &starget->ew);
 		return;
 
 	}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e1a9166..259c90c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -218,16 +218,16 @@
 	put_device(&sdev->sdev_gendev);
 }
 
-static void scsi_device_dev_release_usercontext(void *data)
+static void scsi_device_dev_release_usercontext(struct work_struct *work)
 {
-	struct device *dev = data;
 	struct scsi_device *sdev;
 	struct device *parent;
 	struct scsi_target *starget;
 	unsigned long flags;
 
-	parent = dev->parent;
-	sdev = to_scsi_device(dev);
+	sdev = container_of(work, struct scsi_device, ew.work);
+
+	parent = sdev->sdev_gendev.parent;
 	starget = to_scsi_target(parent);
 
 	spin_lock_irqsave(sdev->host->host_lock, flags);
@@ -258,7 +258,7 @@
 static void scsi_device_dev_release(struct device *dev)
 {
 	struct scsi_device *sdp = to_scsi_device(dev);
-	execute_in_process_context(scsi_device_dev_release_usercontext, dev,
+	execute_in_process_context(scsi_device_dev_release_usercontext,
 				   &sdp->ew);
 }
 
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 39da5cd..d402aff 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -33,7 +33,7 @@
 #include "scsi_tgt_priv.h"
 
 static struct workqueue_struct *scsi_tgtd;
-static kmem_cache_t *scsi_tgt_cmd_cache;
+static struct kmem_cache *scsi_tgt_cmd_cache;
 
 /*
  * TODO: this struct will be killed when the block layer supports large bios
@@ -185,10 +185,11 @@
 	spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
 }
 
-static void scsi_tgt_cmd_destroy(void *data)
+static void scsi_tgt_cmd_destroy(struct work_struct *work)
 {
-	struct scsi_cmnd *cmd = data;
-	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+	struct scsi_tgt_cmd *tcmd =
+		container_of(work, struct scsi_tgt_cmd, work);
+	struct scsi_cmnd *cmd = tcmd->rq->special;
 
 	dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
 		rq_data_dir(cmd->request));
@@ -214,6 +215,7 @@
 	struct list_head *head;
 
 	tcmd->tag = tag;
+	INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
 	spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
 	head = &qdata->cmd_hash[cmd_hashfn(tag)];
 	list_add(&tcmd->hash_list, head);
@@ -303,7 +305,7 @@
 		cmd = tcmd->rq->special;
 
 		shost->hostt->eh_abort_handler(cmd);
-		scsi_tgt_cmd_destroy(cmd);
+		scsi_tgt_cmd_destroy(&tcmd->work);
 	}
 }
 EXPORT_SYMBOL_GPL(scsi_tgt_free_queue);
@@ -347,7 +349,6 @@
 	dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
 
 	scsi_tgt_uspace_send_status(cmd, tcmd->tag);
-	INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy, cmd);
 	queue_work(scsi_tgtd, &tcmd->work);
 }
 
@@ -549,13 +550,15 @@
 
 static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 {
+	struct scsi_tgt_cmd *tcmd;
 	int err;
 
 	err = shost->hostt->eh_abort_handler(cmd);
 	if (err)
 		eprintk("fail to abort %p\n", cmd);
 
-	scsi_tgt_cmd_destroy(cmd);
+	tcmd = cmd->request->end_io_data;
+	scsi_tgt_cmd_destroy(&tcmd->work);
 	return err;
 }
 
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 38c215a..3571ce8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -241,9 +241,9 @@
 #define FC_MGMTSRVR_PORTID		0x00000a
 
 
-static void fc_timeout_deleted_rport(void *data);
-static void fc_timeout_fail_rport_io(void *data);
-static void fc_scsi_scan_rport(void *data);
+static void fc_timeout_deleted_rport(struct work_struct *work);
+static void fc_timeout_fail_rport_io(struct work_struct *work);
+static void fc_scsi_scan_rport(struct work_struct *work);
 
 /*
  * Attribute counts pre object type...
@@ -1613,7 +1613,7 @@
  * 	1 on success / 0 already queued / < 0 for error
  **/
 static int
-fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
+fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
 				unsigned long delay)
 {
 	if (unlikely(!fc_host_devloss_work_q(shost))) {
@@ -1625,9 +1625,6 @@
 		return -EINVAL;
 	}
 
-	if (delay == 0)
-		return queue_work(fc_host_devloss_work_q(shost), work);
-
 	return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
 }
 
@@ -1712,12 +1709,13 @@
  * fc_starget_delete - called to delete the scsi decendents of an rport
  *                  (target and all sdevs)
  *
- * @data:	remote port to be operated on.
+ * @work:	remote port to be operated on.
  **/
 static void
-fc_starget_delete(void *data)
+fc_starget_delete(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, stgt_delete_work);
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	unsigned long flags;
 	struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1751,12 +1749,13 @@
 /**
  * fc_rport_final_delete - finish rport termination and delete it.
  *
- * @data:	remote port to be deleted.
+ * @work:	remote port to be deleted.
  **/
 static void
-fc_rport_final_delete(void *data)
+fc_rport_final_delete(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, rport_delete_work);
 	struct device *dev = &rport->dev;
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1770,7 +1769,7 @@
 
 	/* Delete SCSI target and sdevs */
 	if (rport->scsi_target_id != -1)
-		fc_starget_delete(data);
+		fc_starget_delete(&rport->stgt_delete_work);
 	else if (i->f->dev_loss_tmo_callbk)
 		i->f->dev_loss_tmo_callbk(rport);
 	else if (i->f->terminate_rport_io)
@@ -1829,11 +1828,11 @@
 	rport->channel = channel;
 	rport->fast_io_fail_tmo = -1;
 
-	INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport);
-	INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport);
-	INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport);
-	INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport);
-	INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport);
+	INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
+	INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
+	INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
+	INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
+	INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
 
 	spin_lock_irqsave(shost->host_lock, flags);
 
@@ -1963,7 +1962,7 @@
 			}
 
 			if (match) {
-				struct work_struct *work = 
+				struct delayed_work *work =
 							&rport->dev_loss_work;
 
 				memcpy(&rport->node_name, &ids->node_name,
@@ -2267,12 +2266,13 @@
  *                       was a SCSI target (thus was blocked), and failed
  *                       to return in the alloted time.
  * 
- * @data:	rport target that failed to reappear in the alloted time.
+ * @work:	rport target that failed to reappear in the alloted time.
  **/
 static void
-fc_timeout_deleted_rport(void  *data)
+fc_timeout_deleted_rport(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, dev_loss_work.work);
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
 	unsigned long flags;
@@ -2366,15 +2366,16 @@
  * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
  *                       disconnected SCSI target.
  *
- * @data:	rport to terminate io on.
+ * @work:	rport to terminate io on.
  *
  * Notes: Only requests the failure of the io, not that all are flushed
  *    prior to returning.
  **/
 static void
-fc_timeout_fail_rport_io(void  *data)
+fc_timeout_fail_rport_io(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, fail_io_work.work);
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	struct fc_internal *i = to_fc_internal(shost->transportt);
 
@@ -2387,12 +2388,13 @@
 /**
  * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
  *
- * @data:	remote port to be scanned.
+ * @work:	remote port to be scanned.
  **/
 static void
-fc_scsi_scan_rport(void *data)
+fc_scsi_scan_rport(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, scan_work);
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	unsigned long flags;
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9b25124..9c22f13 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -234,9 +234,11 @@
 	return 0;
 }
 
-static void session_recovery_timedout(void *data)
+static void session_recovery_timedout(struct work_struct *work)
 {
-	struct iscsi_cls_session *session = data;
+	struct iscsi_cls_session *session =
+		container_of(work, struct iscsi_cls_session,
+			     recovery_work.work);
 
 	dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
 		  "out after %d secs\n", session->recovery_tmo);
@@ -276,7 +278,7 @@
 
 	session->transport = transport;
 	session->recovery_tmo = 120;
-	INIT_WORK(&session->recovery_work, session_recovery_timedout, session);
+	INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
 	INIT_LIST_HEAD(&session->host_list);
 	INIT_LIST_HEAD(&session->sess_list);
 
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 9f070f0..3fded48 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -964,9 +964,10 @@
 };
 
 static void
-spi_dv_device_work_wrapper(void *data)
+spi_dv_device_work_wrapper(struct work_struct *work)
 {
-	struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+	struct work_queue_wrapper *wqw =
+		container_of(work, struct work_queue_wrapper, work);
 	struct scsi_device *sdev = wqw->sdev;
 
 	kfree(wqw);
@@ -1006,7 +1007,7 @@
 		return;
 	}
 
-	INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw);
+	INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
 	wqw->sdev = sdev;
 
 	schedule_work(&wqw->work);
diff --git a/drivers/serial/8250_exar_st16c554.c b/drivers/serial/8250_exar_st16c554.c
new file mode 100644
index 0000000..567143a
--- /dev/null
+++ b/drivers/serial/8250_exar_st16c554.c
@@ -0,0 +1,52 @@
+/*
+ *  linux/drivers/serial/8250_exar.c
+ *
+ *  Written by Paul B Schroeder < pschroeder "at" uplogix "dot" com >
+ *  Based on 8250_boca.
+ *
+ *  Copyright (C) 2005 Russell King.
+ *  Data taken from include/asm-i386/serial.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define PORT(_base,_irq)				\
+	{						\
+		.iobase		= _base,		\
+		.irq		= _irq,			\
+		.uartclk	= 1843200,		\
+		.iotype		= UPIO_PORT,		\
+		.flags		= UPF_BOOT_AUTOCONF,	\
+	}
+
+static struct plat_serial8250_port exar_data[] = {
+	PORT(0x100, 5),
+	PORT(0x108, 5),
+	PORT(0x110, 5),
+	PORT(0x118, 5),
+	{ },
+};
+
+static struct platform_device exar_device = {
+	.name			= "serial8250",
+	.id			= PLAT8250_DEV_EXAR_ST16C554,
+	.dev			= {
+		.platform_data	= exar_data,
+	},
+};
+
+static int __init exar_init(void)
+{
+	return platform_device_register(&exar_device);
+}
+
+module_init(exar_init);
+
+MODULE_AUTHOR("Paul B Schroeder");
+MODULE_DESCRIPTION("8250 serial probe module for Exar cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 71d907c..d3d6b82 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -464,11 +464,38 @@
 		serial8250_unregister_port(line - 1);
 }
 
+#ifdef CONFIG_PM
+static int serial_pnp_suspend(struct pnp_dev *dev, pm_message_t state)
+{
+	long line = (long)pnp_get_drvdata(dev);
+
+	if (!line)
+		return -ENODEV;
+	serial8250_suspend_port(line - 1);
+	return 0;
+}
+
+static int serial_pnp_resume(struct pnp_dev *dev)
+{
+	long line = (long)pnp_get_drvdata(dev);
+
+	if (!line)
+		return -ENODEV;
+	serial8250_resume_port(line - 1);
+	return 0;
+}
+#else
+#define serial_pnp_suspend NULL
+#define serial_pnp_resume NULL
+#endif /* CONFIG_PM */
+
 static struct pnp_driver serial_pnp_driver = {
 	.name		= "serial",
-	.id_table	= pnp_dev_table,
 	.probe		= serial_pnp_probe,
 	.remove		= __devexit_p(serial_pnp_remove),
+	.suspend	= serial_pnp_suspend,
+	.resume		= serial_pnp_resume,
+	.id_table	= pnp_dev_table,
 };
 
 static int __init serial8250_pnp_init(void)
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 0b71e7d..fc12d5d 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -210,6 +210,17 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called 8250_boca.
 
+config SERIAL_8250_EXAR_ST16C554
+	tristate "Support Exar ST16C554/554D Quad UART"
+	depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+	help
+	  The Uplogix Envoy TU301 uses this Exar Quad UART.  If you are
+	  tinkering with your Envoy TU301, or have a machine with this UART,
+	  say Y here.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called 8250_exar_st16c554.
+
 config SERIAL_8250_HUB6
 	tristate "Support Hub6 cards"
 	depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
@@ -511,6 +522,25 @@
 	  your boot loader (lilo or loadlin) about how to pass options to the
 	  kernel at boot time.)
 
+config SERIAL_UARTLITE
+	tristate "Xilinx uartlite serial port support"
+	depends on PPC32
+	select SERIAL_CORE
+	help
+	  Say Y here if you want to use the Xilinx uartlite serial controller.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called uartlite.ko.
+
+config SERIAL_UARTLITE_CONSOLE
+	bool "Support for console on Xilinx uartlite serial port"
+	depends on SERIAL_UARTLITE=y
+	select SERIAL_CORE_CONSOLE
+	help
+	  Say Y here if you wish to use a Xilinx uartlite as the system
+	  console (the system console is the device which receives all kernel
+	  messages and warnings and which allows logins in single user mode).
+
 config SERIAL_SUNCORE
 	bool
 	depends on SPARC
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index b4d8a7c..df3632c 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
 obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
 obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
+obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
 obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
 obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
 obj-$(CONFIG_SERIAL_8250_AU1X00) += 8250_au1x00.o
@@ -55,4 +56,5 @@
 obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
 obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
 obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
+obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 4213fab..4d3626e 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -129,6 +129,8 @@
 		 */
 		rsr = readb(port->membase + UART01x_RSR) | UART_DUMMY_RSR_RX;
 		if (unlikely(rsr & UART01x_RSR_ANY)) {
+			writel(0, port->membase + UART01x_ECR);
+
 			if (rsr & UART01x_RSR_BE) {
 				rsr &= ~(UART01x_RSR_FE | UART01x_RSR_PE);
 				port->icount.brk++;
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index 53662b3..af1544f 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -1,11 +1,13 @@
 /*
- * dz.c: Serial port driver for DECStations equiped
+ * dz.c: Serial port driver for DECstations equipped
  *       with the DZ chipset.
  *
  * Copyright (C) 1998 Olivier A. D. Lebaillif
  *
  * Email: olivier.lebaillif@ifrsys.com
  *
+ * Copyright (C) 2004, 2006  Maciej W. Rozycki
+ *
  * [31-AUG-98] triemer
  * Changed IRQ to use Harald's dec internals interrupts.h
  * removed base_addr code - moving address assignment to setup.c
@@ -26,10 +28,16 @@
 
 #undef DEBUG_DZ
 
+#if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/console.h>
+#include <linux/sysrq.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
@@ -45,14 +53,10 @@
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
-#define CONSOLE_LINE (3)	/* for definition of struct console */
-
 #include "dz.h"
 
-#define DZ_INTR_DEBUG 1
-
 static char *dz_name = "DECstation DZ serial driver version ";
-static char *dz_version = "1.02";
+static char *dz_version = "1.03";
 
 struct dz_port {
 	struct uart_port	port;
@@ -61,22 +65,6 @@
 
 static struct dz_port dz_ports[DZ_NB_PORT];
 
-#ifdef DEBUG_DZ
-/*
- * debugging code to send out chars via prom
- */
-static void debug_console(const char *s, int count)
-{
-	unsigned i;
-
-	for (i = 0; i < count; i++) {
-		if (*s == 10)
-			prom_printf("%c", 13);
-		prom_printf("%c", *s++);
-	}
-}
-#endif
-
 /*
  * ------------------------------------------------------------
  * dz_in () and dz_out ()
@@ -90,6 +78,7 @@
 {
 	volatile unsigned short *addr =
 		(volatile unsigned short *) (dport->port.membase + offset);
+
 	return *addr;
 }
 
@@ -98,6 +87,7 @@
 {
 	volatile unsigned short *addr =
 		(volatile unsigned short *) (dport->port.membase + offset);
+
 	*addr = value;
 }
 
@@ -144,7 +134,7 @@
 
 	spin_lock_irqsave(&dport->port.lock, flags);
 	dport->cflag &= ~DZ_CREAD;
-	dz_out(dport, DZ_LPR, dport->cflag);
+	dz_out(dport, DZ_LPR, dport->cflag | dport->port.line);
 	spin_unlock_irqrestore(&dport->port.lock, flags);
 }
 
@@ -155,14 +145,14 @@
 
 /*
  * ------------------------------------------------------------
- * Here starts the interrupt handling routines.  All of the
- * following subroutines are declared as inline and are folded
- * into dz_interrupt.  They were separated out for readability's
- * sake.
  *
- * Note: rs_interrupt() is a "fast" interrupt, which means that it
+ * Here start the interrupt handling routines.  All of the following
+ * subroutines are declared as inline and are folded into
+ * dz_interrupt.  They were separated out for readability's sake.
+ *
+ * Note: dz_interrupt() is a "fast" interrupt, which means that it
  * runs with interrupts turned off.  People who may want to modify
- * rs_interrupt() should try to keep the interrupt handler as fast as
+ * dz_interrupt() should try to keep the interrupt handler as fast as
  * possible.  After you are done making modifications, it is not a bad
  * idea to do:
  *
@@ -180,92 +170,74 @@
  * This routine deals with inputs from any lines.
  * ------------------------------------------------------------
  */
-static inline void dz_receive_chars(struct dz_port *dport)
+static inline void dz_receive_chars(struct dz_port *dport_in,
+				    struct pt_regs *regs)
 {
+	struct dz_port *dport;
 	struct tty_struct *tty = NULL;
 	struct uart_icount *icount;
-	int ignore = 0;
-	unsigned short status, tmp;
+	int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
+	unsigned short status;
 	unsigned char ch, flag;
+	int i;
 
-	/* this code is going to be a problem...
-	   the call to tty_flip_buffer is going to need
-	   to be rethought...
-	 */
-	do {
-		status = dz_in(dport, DZ_RBUF);
+	while ((status = dz_in(dport_in, DZ_RBUF)) & DZ_DVAL) {
+		dport = &dz_ports[LINE(status)];
+		tty = dport->port.info->tty;	/* point to the proper dev */
 
-		/* punt so we don't get duplicate characters */
-		if (!(status & DZ_DVAL))
-			goto ignore_char;
+		ch = UCHAR(status);		/* grab the char */
 
-
-		ch = UCHAR(status);	/* grab the char */
-		flag = TTY_NORMAL;
-
-#if 0
-		if (info->is_console) {
-			if (ch == 0)
-				return;		/* it's a break ... */
-		}
-#endif
-
-		tty = dport->port.info->tty;/* now tty points to the proper dev */
 		icount = &dport->port.icount;
-
-		if (!tty)
-			break;
-
 		icount->rx++;
 
-		/* keep track of the statistics */
-		if (status & (DZ_OERR | DZ_FERR | DZ_PERR)) {
-			if (status & DZ_PERR)	/* parity error */
-				icount->parity++;
-			else if (status & DZ_FERR)	/* frame error */
-				icount->frame++;
-			if (status & DZ_OERR)	/* overrun error */
-				icount->overrun++;
-
-			/*  check to see if we should ignore the character
-			   and mask off conditions that should be ignored
+		flag = TTY_NORMAL;
+		if (status & DZ_FERR) {		/* frame error */
+			/*
+			 * There is no separate BREAK status bit, so
+			 * treat framing errors as BREAKs for Magic SysRq
+			 * and SAK; normally, otherwise.
 			 */
-
-			if (status & dport->port.ignore_status_mask) {
-				if (++ignore > 100)
-					break;
-				goto ignore_char;
-			}
-			/* mask off the error conditions we want to ignore */
-			tmp = status & dport->port.read_status_mask;
-
-			if (tmp & DZ_PERR) {
-				flag = TTY_PARITY;
-#ifdef DEBUG_DZ
-				debug_console("PERR\n", 5);
-#endif
-			} else if (tmp & DZ_FERR) {
+			if (uart_handle_break(&dport->port))
+				continue;
+			if (dport->port.flags & UPF_SAK)
+				flag = TTY_BREAK;
+			else
 				flag = TTY_FRAME;
-#ifdef DEBUG_DZ
-				debug_console("FERR\n", 5);
-#endif
-			}
-			if (tmp & DZ_OERR) {
-#ifdef DEBUG_DZ
-				debug_console("OERR\n", 5);
-#endif
-				tty_insert_flip_char(tty, ch, flag);
-				ch = 0;
-				flag = TTY_OVERRUN;
-			}
-		}
-		tty_insert_flip_char(tty, ch, flag);
-	      ignore_char:
-			;
-	} while (status & DZ_DVAL);
+		} else if (status & DZ_OERR)	/* overrun error */
+			flag = TTY_OVERRUN;
+		else if (status & DZ_PERR)	/* parity error */
+			flag = TTY_PARITY;
 
-	if (tty)
-		tty_flip_buffer_push(tty);
+		/* keep track of the statistics */
+		switch (flag) {
+		case TTY_FRAME:
+			icount->frame++;
+			break;
+		case TTY_PARITY:
+			icount->parity++;
+			break;
+		case TTY_OVERRUN:
+			icount->overrun++;
+			break;
+		case TTY_BREAK:
+			icount->brk++;
+			break;
+		default:
+			break;
+		}
+
+		if (uart_handle_sysrq_char(&dport->port, ch, regs))
+			continue;
+
+		if ((status & dport->port.ignore_status_mask) == 0) {
+			uart_insert_char(&dport->port,
+					 status, DZ_OERR, ch, flag);
+			lines_rx[LINE(status)] = 1;
+		}
+	}
+	for (i = 0; i < DZ_NB_PORT; i++)
+		if (lines_rx[i])
+			tty_flip_buffer_push(dz_ports[i].port.info->tty);
 }
 
 /*
@@ -275,26 +247,32 @@
  * This routine deals with outputs to any lines.
  * ------------------------------------------------------------
  */
-static inline void dz_transmit_chars(struct dz_port *dport)
+static inline void dz_transmit_chars(struct dz_port *dport_in)
 {
-	struct circ_buf *xmit = &dport->port.info->xmit;
+	struct dz_port *dport;
+	struct circ_buf *xmit;
+	unsigned short status;
 	unsigned char tmp;
 
-	if (dport->port.x_char) {	/* XON/XOFF chars */
+	status = dz_in(dport_in, DZ_CSR);
+	dport = &dz_ports[LINE(status)];
+	xmit = &dport->port.info->xmit;
+
+	if (dport->port.x_char) {		/* XON/XOFF chars */
 		dz_out(dport, DZ_TDR, dport->port.x_char);
 		dport->port.icount.tx++;
 		dport->port.x_char = 0;
 		return;
 	}
-	/* if nothing to do or stopped or hardware stopped */
+	/* If nothing to do or stopped or hardware stopped. */
 	if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
 		dz_stop_tx(&dport->port);
 		return;
 	}
 
 	/*
-	 * if something to do ... (rember the dz has no output fifo so we go
-	 * one char at a time :-<
+	 * If something to do... (remember the dz has no output fifo,
+	 * so we go one char at a time) :-<
 	 */
 	tmp = xmit->buf[xmit->tail];
 	xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1);
@@ -304,23 +282,29 @@
 	if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS)
 		uart_write_wakeup(&dport->port);
 
-	/* Are we done */
+	/* Are we are done. */
 	if (uart_circ_empty(xmit))
 		dz_stop_tx(&dport->port);
 }
 
 /*
  * ------------------------------------------------------------
- * check_modem_status ()
+ * check_modem_status()
  *
- * Only valid for the MODEM line duh !
+ * DS 3100 & 5100: Only valid for the MODEM line, duh!
+ * DS 5000/200: Valid for the MODEM and PRINTER line.
  * ------------------------------------------------------------
  */
 static inline void check_modem_status(struct dz_port *dport)
 {
+	/*
+	 * FIXME:
+	 * 1. No status change interrupt; use a timer.
+	 * 2. Handle the 3100/5000 as appropriate. --macro
+	 */
 	unsigned short status;
 
-	/* if not ne modem line just return */
+	/* If not the modem line just return.  */
 	if (dport->port.line != DZ_MODEM)
 		return;
 
@@ -341,21 +325,18 @@
  */
 static irqreturn_t dz_interrupt(int irq, void *dev)
 {
-	struct dz_port *dport;
+	struct dz_port *dport = (struct dz_port *)dev;
 	unsigned short status;
 
 	/* get the reason why we just got an irq */
-	status = dz_in((struct dz_port *)dev, DZ_CSR);
-	dport = &dz_ports[LINE(status)];
+	status = dz_in(dport, DZ_CSR);
 
-	if (status & DZ_RDONE)
-		dz_receive_chars(dport);
+	if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
+		dz_receive_chars(dport, regs);
 
-	if (status & DZ_TRDY)
+	if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
 		dz_transmit_chars(dport);
 
-	/* FIXME: what about check modem status??? --rmk */
-
 	return IRQ_HANDLED;
 }
 
@@ -367,13 +348,13 @@
 
 static unsigned int dz_get_mctrl(struct uart_port *uport)
 {
+	/*
+	 * FIXME: Handle the 3100/5000 as appropriate. --macro
+	 */
 	struct dz_port *dport = (struct dz_port *)uport;
 	unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
 
 	if (dport->port.line == DZ_MODEM) {
-		/*
-		 * CHECKME: This is a guess from the other code... --rmk
-		 */
 		if (dz_in(dport, DZ_MSR) & DZ_MODEM_DSR)
 			mctrl &= ~TIOCM_DSR;
 	}
@@ -383,6 +364,9 @@
 
 static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl)
 {
+	/*
+	 * FIXME: Handle the 3100/5000 as appropriate. --macro
+	 */
 	struct dz_port *dport = (struct dz_port *)uport;
 	unsigned short tmp;
 
@@ -409,13 +393,6 @@
 	unsigned long flags;
 	unsigned short tmp;
 
-	/* The dz lines for the mouse/keyboard must be
-	 * opened using their respective drivers.
-	 */
-	if ((dport->port.line == DZ_KEYBOARD) ||
-	    (dport->port.line == DZ_MOUSE))
-		return -ENODEV;
-
 	spin_lock_irqsave(&dport->port.lock, flags);
 
 	/* enable the interrupt and the scanning */
@@ -442,7 +419,8 @@
 }
 
 /*
- * get_lsr_info - get line status register info
+ * -------------------------------------------------------------------
+ * dz_tx_empty() -- get the transmitter empty status
  *
  * Purpose: Let user call ioctl() to get info when the UART physically
  *          is emptied.  On bus types like RS485, the transmitter must
@@ -450,21 +428,28 @@
  *          the transmit shift register is empty, not be done when the
  *          transmit holding register is empty.  This functionality
  *          allows an RS485 driver to be written in user space.
+ * -------------------------------------------------------------------
  */
 static unsigned int dz_tx_empty(struct uart_port *uport)
 {
 	struct dz_port *dport = (struct dz_port *)uport;
-	unsigned short status = dz_in(dport, DZ_LPR);
+	unsigned short tmp, mask = 1 << dport->port.line;
 
-	/* FIXME: this appears to be obviously broken --rmk. */
-	return status ? TIOCSER_TEMT : 0;
+	tmp = dz_in(dport, DZ_TCR);
+	tmp &= mask;
+
+	return tmp ? 0 : TIOCSER_TEMT;
 }
 
 static void dz_break_ctl(struct uart_port *uport, int break_state)
 {
+	/*
+	 * FIXME: Can't access BREAK bits in TDR easily;
+	 * reuse the code for polled TX. --macro
+	 */
 	struct dz_port *dport = (struct dz_port *)uport;
 	unsigned long flags;
-	unsigned short tmp, mask = 1 << uport->line;
+	unsigned short tmp, mask = 1 << dport->port.line;
 
 	spin_lock_irqsave(&uport->lock, flags);
 	tmp = dz_in(dport, DZ_TCR);
@@ -561,7 +546,7 @@
 
 	spin_lock_irqsave(&dport->port.lock, flags);
 
-	dz_out(dport, DZ_LPR, cflag);
+	dz_out(dport, DZ_LPR, cflag | dport->port.line);
 	dport->cflag = cflag;
 
 	/* setup accept flag */
@@ -650,7 +635,7 @@
 	for (i = 0, dport = dz_ports; i < DZ_NB_PORT; i++, dport++) {
 		spin_lock_init(&dport->port.lock);
 		dport->port.membase	= (char *) base;
-		dport->port.iotype	= UPIO_PORT;
+		dport->port.iotype	= UPIO_MEM;
 		dport->port.irq		= dec_interrupt[DEC_IRQ_DZ11];
 		dport->port.line	= i;
 		dport->port.fifosize	= 1;
@@ -662,10 +647,7 @@
 static void dz_reset(struct dz_port *dport)
 {
 	dz_out(dport, DZ_CSR, DZ_CLR);
-
 	while (dz_in(dport, DZ_CSR) & DZ_CLR);
-		/* FIXME: cpu_relax? */
-
 	iob();
 
 	/* enable scanning */
@@ -673,26 +655,55 @@
 }
 
 #ifdef CONFIG_SERIAL_DZ_CONSOLE
+/*
+ * -------------------------------------------------------------------
+ * dz_console_putchar() -- transmit a character
+ *
+ * Polled transmission.  This is tricky.  We need to mask transmit
+ * interrupts so that they do not interfere, enable the transmitter
+ * for the line requested and then wait till the transmit scanner
+ * requests data for this line.  But it may request data for another
+ * line first, in which case we have to disable its transmitter and
+ * repeat waiting till our line pops up.  Only then the character may
+ * be transmitted.  Finally, the state of the transmitter mask is
+ * restored.  Welcome to the world of PDP-11!
+ * -------------------------------------------------------------------
+ */
 static void dz_console_putchar(struct uart_port *uport, int ch)
 {
 	struct dz_port *dport = (struct dz_port *)uport;
 	unsigned long flags;
-	int loops = 2500;
-	unsigned short tmp = (unsigned char)ch;
-	/* this code sends stuff out to serial device - spinning its
-	   wheels and waiting. */
+	unsigned short csr, tcr, trdy, mask;
+	int loops = 10000;
 
 	spin_lock_irqsave(&dport->port.lock, flags);
-
-	/* spin our wheels */
-	while (((dz_in(dport, DZ_CSR) & DZ_TRDY) != DZ_TRDY) && loops--)
-		/* FIXME: cpu_relax, udelay? --rmk */
-		;
-
-	/* Actually transmit the character. */
-	dz_out(dport, DZ_TDR, tmp);
-
+	csr = dz_in(dport, DZ_CSR);
+	dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
+	tcr = dz_in(dport, DZ_TCR);
+	tcr |= 1 << dport->port.line;
+	mask = tcr;
+	dz_out(dport, DZ_TCR, mask);
+	iob();
 	spin_unlock_irqrestore(&dport->port.lock, flags);
+
+	while (loops--) {
+		trdy = dz_in(dport, DZ_CSR);
+		if (!(trdy & DZ_TRDY))
+			continue;
+		trdy = (trdy & DZ_TLINE) >> 8;
+		if (trdy == dport->port.line)
+			break;
+		mask &= ~(1 << trdy);
+		dz_out(dport, DZ_TCR, mask);
+		iob();
+		udelay(2);
+	}
+
+	if (loops)				/* Cannot send otherwise. */
+		dz_out(dport, DZ_TDR, ch);
+
+	dz_out(dport, DZ_TCR, tcr);
+	dz_out(dport, DZ_CSR, csr);
 }
 
 /*
@@ -703,11 +714,11 @@
  * The console must be locked when we get here.
  * -------------------------------------------------------------------
  */
-static void dz_console_print(struct console *cons,
+static void dz_console_print(struct console *co,
 			     const char *str,
 			     unsigned int count)
 {
-	struct dz_port *dport = &dz_ports[CONSOLE_LINE];
+	struct dz_port *dport = &dz_ports[co->index];
 #ifdef DEBUG_DZ
 	prom_printf((char *) str);
 #endif
@@ -716,49 +727,43 @@
 
 static int __init dz_console_setup(struct console *co, char *options)
 {
-	struct dz_port *dport = &dz_ports[CONSOLE_LINE];
+	struct dz_port *dport = &dz_ports[co->index];
 	int baud = 9600;
 	int bits = 8;
 	int parity = 'n';
 	int flow = 'n';
-	int ret;
-	unsigned short mask, tmp;
 
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
 
 	dz_reset(dport);
 
-	ret = uart_set_options(&dport->port, co, baud, parity, bits, flow);
-	if (ret == 0) {
-		mask = 1 << dport->port.line;
-		tmp = dz_in(dport, DZ_TCR);	/* read the TX flag */
-		if (!(tmp & mask)) {
-			tmp |= mask;		/* set the TX flag */
-			dz_out(dport, DZ_TCR, tmp);
-		}
-	}
-
-	return ret;
+	return uart_set_options(&dport->port, co, baud, parity, bits, flow);
 }
 
-static struct console dz_sercons =
-{
+static struct uart_driver dz_reg;
+static struct console dz_sercons = {
 	.name	= "ttyS",
 	.write	= dz_console_print,
 	.device	= uart_console_device,
 	.setup	= dz_console_setup,
-	.flags	= CON_CONSDEV | CON_PRINTBUFFER,
-	.index	= CONSOLE_LINE,
+	.flags	= CON_PRINTBUFFER,
+	.index	= -1,
+	.data	= &dz_reg,
 };
 
-void __init dz_serial_console_init(void)
+static int __init dz_serial_console_init(void)
 {
-	dz_init_ports();
-
-	register_console(&dz_sercons);
+	if (!IOASIC) {
+		dz_init_ports();
+		register_console(&dz_sercons);
+		return 0;
+	} else
+		return -ENXIO;
 }
 
+console_initcall(dz_serial_console_init);
+
 #define SERIAL_DZ_CONSOLE	&dz_sercons
 #else
 #define SERIAL_DZ_CONSOLE	NULL
@@ -767,35 +772,29 @@
 static struct uart_driver dz_reg = {
 	.owner			= THIS_MODULE,
 	.driver_name		= "serial",
-	.dev_name		= "ttyS%d",
+	.dev_name		= "ttyS",
 	.major			= TTY_MAJOR,
 	.minor			= 64,
 	.nr			= DZ_NB_PORT,
 	.cons			= SERIAL_DZ_CONSOLE,
 };
 
-int __init dz_init(void)
+static int __init dz_init(void)
 {
-	unsigned long flags;
 	int ret, i;
 
+	if (IOASIC)
+		return -ENXIO;
+
 	printk("%s%s\n", dz_name, dz_version);
 
 	dz_init_ports();
 
-	save_flags(flags);
-	cli();
-
 #ifndef CONFIG_SERIAL_DZ_CONSOLE
 	/* reset the chip */
 	dz_reset(&dz_ports[0]);
 #endif
 
-	/* order matters here... the trick is that flags
-	   is updated... in request_irq - to immediatedly obliterate
-	   it is unwise. */
-	restore_flags(flags);
-
 	if (request_irq(dz_ports[0].port.irq, dz_interrupt,
 			IRQF_DISABLED, "DZ", &dz_ports[0]))
 		panic("Unable to register DZ interrupt");
@@ -810,5 +809,7 @@
 	return ret;
 }
 
+module_init(dz_init);
+
 MODULE_DESCRIPTION("DECstation DZ serial driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/serial/dz.h b/drivers/serial/dz.h
index 86ef417..9674d4e 100644
--- a/drivers/serial/dz.h
+++ b/drivers/serial/dz.h
@@ -1,20 +1,22 @@
 /*
- * dz.h: Serial port driver for DECStations equiped 
+ * dz.h: Serial port driver for DECstations equipped
  *       with the DZ chipset.
  *
  * Copyright (C) 1998 Olivier A. D. Lebaillif 
  *             
  * Email: olivier.lebaillif@ifrsys.com
  *
+ * Copyright (C) 2004, 2006  Maciej W. Rozycki
  */
 #ifndef DZ_SERIAL_H
 #define DZ_SERIAL_H
 
 /*
- * Definitions for the Control and Status Received.
+ * Definitions for the Control and Status Register.
  */
 #define DZ_TRDY        0x8000                 /* Transmitter empty */
-#define DZ_TIE         0x4000                 /* Transmitter Interrupt Enable */
+#define DZ_TIE         0x4000                 /* Transmitter Interrupt Enbl */
+#define DZ_TLINE       0x0300                 /* Transmitter Line Number */
 #define DZ_RDONE       0x0080                 /* Receiver data ready */
 #define DZ_RIE         0x0040                 /* Receive Interrupt Enable */
 #define DZ_MSE         0x0020                 /* Master Scan Enable */
@@ -22,32 +24,44 @@
 #define DZ_MAINT       0x0008                 /* Loop Back Mode */
 
 /*
- * Definitions for the Received buffer. 
+ * Definitions for the Receiver Buffer Register.
  */
-#define DZ_RBUF_MASK   0x00FF                 /* Data Mask in the Receive Buffer */
-#define DZ_LINE_MASK   0x0300                 /* Line Mask in the Receive Buffer */
+#define DZ_RBUF_MASK   0x00FF                 /* Data Mask */
+#define DZ_LINE_MASK   0x0300                 /* Line Mask */
 #define DZ_DVAL        0x8000                 /* Valid Data indicator */
 #define DZ_OERR        0x4000                 /* Overrun error indicator */
 #define DZ_FERR        0x2000                 /* Frame error indicator */
 #define DZ_PERR        0x1000                 /* Parity error indicator */
 
-#define LINE(x) (x & DZ_LINE_MASK) >> 8       /* Get the line number from the input buffer */
-#define UCHAR(x) (unsigned char)(x & DZ_RBUF_MASK)
+#define LINE(x) ((x & DZ_LINE_MASK) >> 8)     /* Get the line number
+                                                 from the input buffer */
+#define UCHAR(x) ((unsigned char)(x & DZ_RBUF_MASK))
 
 /*
- * Definitions for the Transmit Register.
+ * Definitions for the Transmit Control Register.
  */
 #define DZ_LINE_KEYBOARD 0x0001
 #define DZ_LINE_MOUSE    0x0002
 #define DZ_LINE_MODEM    0x0004
 #define DZ_LINE_PRINTER  0x0008
 
+#define DZ_MODEM_RTS     0x0800               /* RTS for the modem line (2) */
 #define DZ_MODEM_DTR     0x0400               /* DTR for the modem line (2) */
+#define DZ_PRINT_RTS     0x0200               /* RTS for the prntr line (3) */
+#define DZ_PRINT_DTR     0x0100               /* DTR for the prntr line (3) */
+#define DZ_LNENB         0x000f               /* Transmitter Line Enable */
 
 /*
  * Definitions for the Modem Status Register.
  */
+#define DZ_MODEM_RI      0x0800               /* RI for the modem line (2) */
+#define DZ_MODEM_CD      0x0400               /* CD for the modem line (2) */
 #define DZ_MODEM_DSR     0x0200               /* DSR for the modem line (2) */
+#define DZ_MODEM_CTS     0x0100               /* CTS for the modem line (2) */
+#define DZ_PRINT_RI      0x0008               /* RI for the printer line (3) */
+#define DZ_PRINT_CD      0x0004               /* CD for the printer line (3) */
+#define DZ_PRINT_DSR     0x0002               /* DSR for the prntr line (3) */
+#define DZ_PRINT_CTS     0x0001               /* CTS for the prntr line (3) */
 
 /*
  * Definitions for the Transmit Data Register.
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index aee1b31..3db206d 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -60,7 +60,8 @@
 #if defined(CONFIG_HW_FEITH)
 #define	CONSOLE_BAUD_RATE	38400
 #define	DEFAULT_CBAUD		B38400
-#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || defined(CONFIG_M5329EVB)
+#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || \
+      defined(CONFIG_M5329EVB) || defined(CONFIG_GILBARCO)
 #define CONSOLE_BAUD_RATE 	115200
 #define DEFAULT_CBAUD		B115200
 #elif defined(CONFIG_ARNEWSH) || defined(CONFIG_FREESCALE) || \
@@ -109,12 +110,30 @@
 		.irq = IRQBASE,
 		.flags = ASYNC_BOOT_AUTOCONF,
 	},
+#ifdef MCFUART_BASE2
 	{  /* ttyS1 */
 		.magic = 0,
 		.addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE2),
 		.irq = IRQBASE+1,
 		.flags = ASYNC_BOOT_AUTOCONF,
 	},
+#endif
+#ifdef MCFUART_BASE3
+	{  /* ttyS2 */
+		.magic = 0,
+		.addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE3),
+		.irq = IRQBASE+2,
+		.flags = ASYNC_BOOT_AUTOCONF,
+	},
+#endif
+#ifdef MCFUART_BASE4
+	{  /* ttyS3 */
+		.magic = 0,
+		.addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE4),
+		.irq = IRQBASE+3,
+		.flags = ASYNC_BOOT_AUTOCONF,
+	},
+#endif
 };
 
 
@@ -1516,6 +1535,22 @@
 	imrp = (volatile unsigned long *) (MCF_MBAR + MCFICM_INTC0 +
 		MCFINTC_IMRL);
 	*imrp &= ~((1 << (info->irq - MCFINT_VECBASE)) | 1);
+#if defined(CONFIG_M527x)
+	{
+		/*
+		 * External Pin Mask Setting & Enable External Pin for Interface
+		 * mrcbis@aliceposta.it
+        	 */
+		unsigned short *serpin_enable_mask;
+		serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART);
+		if (info->line == 0)
+			*serpin_enable_mask |= UART0_ENABLE_MASK;
+		else if (info->line == 1)
+			*serpin_enable_mask |= UART1_ENABLE_MASK;
+		else if (info->line == 2)
+			*serpin_enable_mask |= UART2_ENABLE_MASK;
+	}
+#endif
 #elif defined(CONFIG_M520x)
 	volatile unsigned char *icrp, *uartp;
 	volatile unsigned long *imrp;
@@ -1713,7 +1748,7 @@
 	/* Initialize the tty_driver structure */
 	mcfrs_serial_driver->owner = THIS_MODULE;
 	mcfrs_serial_driver->name = "ttyS";
-	mcfrs_serial_driver->driver_name = "serial";
+	mcfrs_serial_driver->driver_name = "mcfserial";
 	mcfrs_serial_driver->major = TTY_MAJOR;
 	mcfrs_serial_driver->minor_start = 64;
 	mcfrs_serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1797,10 +1832,23 @@
 	uartp[MCFUART_UMR] = MCFUART_MR1_PARITYNONE | MCFUART_MR1_CS8;
 	uartp[MCFUART_UMR] = MCFUART_MR2_STOP1;
 
+#ifdef	CONFIG_M5272
+{
+	/*
+	 * For the MCF5272, also compute the baudrate fraction.
+	 */
+	int fraction = MCF_BUSCLK - (clk * 32 * mcfrs_console_baud);
+	fraction *= 16;
+	fraction /= (32 * mcfrs_console_baud);
+	uartp[MCFUART_UFPD] = (fraction & 0xf);		/* set fraction */
+	clk = (MCF_BUSCLK / mcfrs_console_baud) / 32;
+}
+#else
 	clk = ((MCF_BUSCLK / mcfrs_console_baud) + 16) / 32; /* set baud */
+#endif
+
 	uartp[MCFUART_UBG1] = (clk & 0xff00) >> 8;  /* set msb baud */
 	uartp[MCFUART_UBG2] = (clk & 0xff);  /* set lsb baud */
-
 	uartp[MCFUART_UCSR] = MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER;
 	uartp[MCFUART_UCR] = MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE;
 
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index 8eea69f..29823bd 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -555,7 +555,7 @@
 	if (!mpsc_sdma_tx_active(pi)) {
 		txre = (struct mpsc_tx_desc *)(pi->txr +
 			(pi->txr_tail * MPSC_TXRE_SIZE));
-		dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
+		dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			invalidate_dcache_range((ulong)txre,
@@ -931,7 +931,7 @@
 	}
 	txre->link = cpu_to_be32(pi->txr_p);	/* Wrap last back to first */
 
-	dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
+	dma_cache_sync(pi->port.dev, (void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
 		DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
@@ -1005,7 +1005,7 @@
 
 	rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
 
-	dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+	dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 		invalidate_dcache_range((ulong)rxre,
@@ -1029,7 +1029,7 @@
 		}
 
 		bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
-		dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
+		dma_cache_sync(pi->port.dev, (void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			invalidate_dcache_range((ulong)bp,
@@ -1098,7 +1098,7 @@
 					    SDMA_DESC_CMDSTAT_F |
 					    SDMA_DESC_CMDSTAT_L);
 		wmb();
-		dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
+		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			flush_dcache_range((ulong)rxre,
@@ -1109,7 +1109,7 @@
 		pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
 		rxre = (struct mpsc_rx_desc *)(pi->rxr +
 			(pi->rxr_posn * MPSC_RXRE_SIZE));
-		dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			invalidate_dcache_range((ulong)rxre,
@@ -1143,7 +1143,7 @@
 							   SDMA_DESC_CMDSTAT_EI
 							   : 0));
 	wmb();
-	dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
+	dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 		flush_dcache_range((ulong)txre,
@@ -1192,7 +1192,7 @@
 		else /* All tx data copied into ring bufs */
 			return;
 
-		dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
+		dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			flush_dcache_range((ulong)bp,
@@ -1217,7 +1217,7 @@
 		txre = (struct mpsc_tx_desc *)(pi->txr +
 			(pi->txr_tail * MPSC_TXRE_SIZE));
 
-		dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
+		dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			invalidate_dcache_range((ulong)txre,
@@ -1235,7 +1235,7 @@
 
 			txre = (struct mpsc_tx_desc *)(pi->txr +
 				(pi->txr_tail * MPSC_TXRE_SIZE));
-			dma_cache_sync((void *) txre, MPSC_TXRE_SIZE,
+			dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE,
 				DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
@@ -1652,7 +1652,7 @@
 			count--;
 		}
 
-		dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
+		dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			flush_dcache_range((ulong)bp,
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index cfcc3ca..3b5f19e 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -775,7 +775,7 @@
 			 *
 			 * Clean this up later..
 			 */
-			clk = clk_get("module_clk");
+			clk = clk_get(NULL, "module_clk");
 			port->uartclk = clk_get_rate(clk) * 16;
 			clk_put(clk);
 		}
@@ -960,7 +960,7 @@
 		default:
 		{
 #if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64)
-			struct clk *clk = clk_get("module_clk");
+			struct clk *clk = clk_get(NULL, "module_clk");
 			t = SCBRR_VALUE(baud, clk_get_rate(clk));
 			clk_put(clk);
 #else
@@ -1128,7 +1128,7 @@
 		 * XXX: We should use a proper SCI/SCIF clock
 		 */
 		{
-			struct clk *clk = clk_get("module_clk");
+			struct clk *clk = clk_get(NULL, "module_clk");
 			sci_ports[i].port.uartclk = clk_get_rate(clk) * 16;
 			clk_put(clk);
 		}
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 7ee9921..e4557cc 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -133,6 +133,20 @@
 # define SCIF_ORER	0x0001		/* Overrun error bit */
 # define SCSCR_INIT(port)	0x3a	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 # define SCIF_ONLY
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
+# define SCSPTR1 0xfffe8820 /* 16 bit SCIF */
+# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */
+# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */
+# define SCSCR_INIT(port)	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+# define SCIF_ONLY
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
+# define SCSPTR1 0xf8410020 /* 16 bit SCIF */
+# define SCSPTR2 0xf8420020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001  /* overrun error bit */
+# define SCSCR_INIT(port)	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+# define SCIF_ONLY
 #else
 # error CPU subtype not defined
 #endif
@@ -365,6 +379,7 @@
 SCIx_FNS(SCxRDR, 0x0a,  8, 0x14,  8, 0x0A,  8, 0x14,  8, 0x05,  8)
 SCIF_FNS(SCFCR,                      0x0c,  8, 0x18, 16)
 #if defined(CONFIG_CPU_SUBTYPE_SH7760) || defined(CONFIG_CPU_SUBTYPE_SH7780)
+SCIF_FNS(SCFDR,			     0x0e, 16, 0x1C, 16)
 SCIF_FNS(SCTFDR,		     0x0e, 16, 0x1C, 16)
 SCIF_FNS(SCRFDR,		     0x0e, 16, 0x20, 16)
 SCIF_FNS(SCSPTR,			0,  0, 0x24, 16)
@@ -544,6 +559,28 @@
 	if (port->mapbase == 0xffe10000)
 		return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
 }
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+	if (port->mapbase == 0xfffe8000)
+		return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xfffe8800)
+		return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xfffe9000)
+		return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xfffe9800)
+		return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
+}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+	if (port->mapbase == 0xf8400000)
+		return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xf8410000)
+		return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xf8420000)
+		return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+}
 #endif
 
 /*
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
new file mode 100644
index 0000000..8369065
--- /dev/null
+++ b/drivers/serial/uartlite.c
@@ -0,0 +1,505 @@
+/*
+ * uartlite.c: Serial driver for Xilinx uartlite serial controller
+ *
+ * Peter Korsgaard <jacmet@sunsite.dk>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+
+#define ULITE_MAJOR		204
+#define ULITE_MINOR		187
+#define ULITE_NR_UARTS		4
+
+/* For register details see datasheet:
+   http://www.xilinx.com/bvdocs/ipcenter/data_sheet/opb_uartlite.pdf
+*/
+#define ULITE_RX		0x00
+#define ULITE_TX		0x04
+#define ULITE_STATUS		0x08
+#define ULITE_CONTROL		0x0c
+
+#define ULITE_REGION		16
+
+#define ULITE_STATUS_RXVALID	0x01
+#define ULITE_STATUS_RXFULL	0x02
+#define ULITE_STATUS_TXEMPTY	0x04
+#define ULITE_STATUS_TXFULL	0x08
+#define ULITE_STATUS_IE		0x10
+#define ULITE_STATUS_OVERRUN	0x20
+#define ULITE_STATUS_FRAME	0x40
+#define ULITE_STATUS_PARITY	0x80
+
+#define ULITE_CONTROL_RST_TX	0x01
+#define ULITE_CONTROL_RST_RX	0x02
+#define ULITE_CONTROL_IE	0x10
+
+
+static struct uart_port ports[ULITE_NR_UARTS];
+
+static int ulite_receive(struct uart_port *port, int stat)
+{
+	struct tty_struct *tty = port->info->tty;
+	unsigned char ch = 0;
+	char flag = TTY_NORMAL;
+
+	if ((stat & (ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
+		     | ULITE_STATUS_FRAME)) == 0)
+		return 0;
+
+	/* stats */
+	if (stat & ULITE_STATUS_RXVALID) {
+		port->icount.rx++;
+		ch = readb(port->membase + ULITE_RX);
+
+		if (stat & ULITE_STATUS_PARITY)
+			port->icount.parity++;
+	}
+
+	if (stat & ULITE_STATUS_OVERRUN)
+		port->icount.overrun++;
+
+	if (stat & ULITE_STATUS_FRAME)
+		port->icount.frame++;
+
+
+	/* drop byte with parity error if IGNPAR specificed */
+	if (stat & port->ignore_status_mask & ULITE_STATUS_PARITY)
+		stat &= ~ULITE_STATUS_RXVALID;
+
+	stat &= port->read_status_mask;
+
+	if (stat & ULITE_STATUS_PARITY)
+		flag = TTY_PARITY;
+
+
+	stat &= ~port->ignore_status_mask;
+
+	if (stat & ULITE_STATUS_RXVALID)
+		tty_insert_flip_char(tty, ch, flag);
+
+	if (stat & ULITE_STATUS_FRAME)
+		tty_insert_flip_char(tty, 0, TTY_FRAME);
+
+	if (stat & ULITE_STATUS_OVERRUN)
+		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+	return 1;
+}
+
+static int ulite_transmit(struct uart_port *port, int stat)
+{
+	struct circ_buf *xmit  = &port->info->xmit;
+
+	if (stat & ULITE_STATUS_TXFULL)
+		return 0;
+
+	if (port->x_char) {
+		writeb(port->x_char, port->membase + ULITE_TX);
+		port->x_char = 0;
+		port->icount.tx++;
+		return 1;
+	}
+
+	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+		return 0;
+
+	writeb(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+	xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
+	port->icount.tx++;
+
+	/* wake up */
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(port);
+
+	return 1;
+}
+
+static irqreturn_t ulite_isr(int irq, void *dev_id)
+{
+	struct uart_port *port = (struct uart_port *)dev_id;
+	int busy;
+
+	do {
+		int stat = readb(port->membase + ULITE_STATUS);
+		busy  = ulite_receive(port, stat);
+		busy |= ulite_transmit(port, stat);
+	} while (busy);
+
+	tty_flip_buffer_push(port->info->tty);
+
+	return IRQ_HANDLED;
+}
+
+static unsigned int ulite_tx_empty(struct uart_port *port)
+{
+	unsigned long flags;
+	unsigned int ret;
+
+	spin_lock_irqsave(&port->lock, flags);
+	ret = readb(port->membase + ULITE_STATUS);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int ulite_get_mctrl(struct uart_port *port)
+{
+	return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void ulite_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	/* N/A */
+}
+
+static void ulite_stop_tx(struct uart_port *port)
+{
+	/* N/A */
+}
+
+static void ulite_start_tx(struct uart_port *port)
+{
+	ulite_transmit(port, readb(port->membase + ULITE_STATUS));
+}
+
+static void ulite_stop_rx(struct uart_port *port)
+{
+	/* don't forward any more data (like !CREAD) */
+	port->ignore_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
+		| ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+}
+
+static void ulite_enable_ms(struct uart_port *port)
+{
+	/* N/A */
+}
+
+static void ulite_break_ctl(struct uart_port *port, int ctl)
+{
+	/* N/A */
+}
+
+static int ulite_startup(struct uart_port *port)
+{
+	int ret;
+
+	ret = request_irq(port->irq, ulite_isr,
+			  IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "uartlite", port);
+	if (ret)
+		return ret;
+
+	writeb(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
+	       port->membase + ULITE_CONTROL);
+	writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+
+	return 0;
+}
+
+static void ulite_shutdown(struct uart_port *port)
+{
+	writeb(0, port->membase + ULITE_CONTROL);
+	readb(port->membase + ULITE_CONTROL); /* dummy */
+	free_irq(port->irq, port);
+}
+
+static void ulite_set_termios(struct uart_port *port, struct termios *termios,
+			      struct termios *old)
+{
+	unsigned long flags;
+	unsigned int baud;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
+		| ULITE_STATUS_TXFULL;
+
+	if (termios->c_iflag & INPCK)
+		port->read_status_mask |=
+			ULITE_STATUS_PARITY | ULITE_STATUS_FRAME;
+
+	port->ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		port->ignore_status_mask |= ULITE_STATUS_PARITY
+			| ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+
+	/* ignore all characters if CREAD is not set */
+	if ((termios->c_cflag & CREAD) == 0)
+		port->ignore_status_mask |=
+			ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
+			| ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+
+	/* update timeout */
+	baud = uart_get_baud_rate(port, termios, old, 0, 460800);
+	uart_update_timeout(port, termios->c_cflag, baud);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *ulite_type(struct uart_port *port)
+{
+	return port->type == PORT_UARTLITE ? "uartlite" : NULL;
+}
+
+static void ulite_release_port(struct uart_port *port)
+{
+	release_mem_region(port->mapbase, ULITE_REGION);
+	iounmap(port->membase);
+	port->membase = 0;
+}
+
+static int ulite_request_port(struct uart_port *port)
+{
+	if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) {
+		dev_err(port->dev, "Memory region busy\n");
+		return -EBUSY;
+	}
+
+	port->membase = ioremap(port->mapbase, ULITE_REGION);
+	if (!port->membase) {
+		dev_err(port->dev, "Unable to map registers\n");
+		release_mem_region(port->mapbase, ULITE_REGION);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void ulite_config_port(struct uart_port *port, int flags)
+{
+	ulite_request_port(port);
+	port->type = PORT_UARTLITE;
+}
+
+static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+	/* we don't want the core code to modify any port params */
+	return -EINVAL;
+}
+
+static struct uart_ops ulite_ops = {
+	.tx_empty	= ulite_tx_empty,
+	.set_mctrl	= ulite_set_mctrl,
+	.get_mctrl	= ulite_get_mctrl,
+	.stop_tx	= ulite_stop_tx,
+	.start_tx	= ulite_start_tx,
+	.stop_rx	= ulite_stop_rx,
+	.enable_ms	= ulite_enable_ms,
+	.break_ctl	= ulite_break_ctl,
+	.startup	= ulite_startup,
+	.shutdown	= ulite_shutdown,
+	.set_termios	= ulite_set_termios,
+	.type		= ulite_type,
+	.release_port	= ulite_release_port,
+	.request_port	= ulite_request_port,
+	.config_port	= ulite_config_port,
+	.verify_port	= ulite_verify_port
+};
+
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+static void ulite_console_wait_tx(struct uart_port *port)
+{
+	int i;
+
+	/* wait up to 10ms for the character(s) to be sent */
+	for (i = 0; i < 10000; i++) {
+		if (readb(port->membase + ULITE_STATUS) & ULITE_STATUS_TXEMPTY)
+			break;
+		udelay(1);
+	}
+}
+
+static void ulite_console_putchar(struct uart_port *port, int ch)
+{
+	ulite_console_wait_tx(port);
+	writeb(ch, port->membase + ULITE_TX);
+}
+
+static void ulite_console_write(struct console *co, const char *s,
+				unsigned int count)
+{
+	struct uart_port *port = &ports[co->index];
+	unsigned long flags;
+	unsigned int ier;
+	int locked = 1;
+
+	if (oops_in_progress) {
+		locked = spin_trylock_irqsave(&port->lock, flags);
+	} else
+		spin_lock_irqsave(&port->lock, flags);
+
+	/* save and disable interrupt */
+	ier = readb(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
+	writeb(0, port->membase + ULITE_CONTROL);
+
+	uart_console_write(port, s, count, ulite_console_putchar);
+
+	ulite_console_wait_tx(port);
+
+	/* restore interrupt state */
+	if (ier)
+		writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+
+	if (locked)
+		spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int __init ulite_console_setup(struct console *co, char *options)
+{
+	struct uart_port *port;
+	int baud = 9600;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+
+	if (co->index < 0 || co->index >= ULITE_NR_UARTS)
+		return -EINVAL;
+
+	port = &ports[co->index];
+
+	/* not initialized yet? */
+	if (!port->membase)
+		return -ENODEV;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver ulite_uart_driver;
+
+static struct console ulite_console = {
+	.name	= "ttyUL",
+	.write	= ulite_console_write,
+	.device	= uart_console_device,
+	.setup	= ulite_console_setup,
+	.flags	= CON_PRINTBUFFER,
+	.index	= -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */
+	.data	= &ulite_uart_driver,
+};
+
+static int __init ulite_console_init(void)
+{
+	register_console(&ulite_console);
+	return 0;
+}
+
+console_initcall(ulite_console_init);
+
+#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
+
+static struct uart_driver ulite_uart_driver = {
+	.owner		= THIS_MODULE,
+	.driver_name	= "uartlite",
+	.dev_name	= "ttyUL",
+	.major		= ULITE_MAJOR,
+	.minor		= ULITE_MINOR,
+	.nr		= ULITE_NR_UARTS,
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+	.cons		= &ulite_console,
+#endif
+};
+
+static int __devinit ulite_probe(struct platform_device *pdev)
+{
+	struct resource *res, *res2;
+	struct uart_port *port;
+
+	if (pdev->id < 0 || pdev->id >= ULITE_NR_UARTS)
+		return -EINVAL;
+
+	if (ports[pdev->id].membase)
+		return -EBUSY;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res2)
+		return -ENODEV;
+
+	port = &ports[pdev->id];
+
+	port->fifosize	= 16;
+	port->regshift	= 2;
+	port->iotype	= UPIO_MEM;
+	port->iobase	= 1; /* mark port in use */
+	port->mapbase	= res->start;
+	port->membase	= 0;
+	port->ops	= &ulite_ops;
+	port->irq	= res2->start;
+	port->flags	= UPF_BOOT_AUTOCONF;
+	port->dev	= &pdev->dev;
+	port->type	= PORT_UNKNOWN;
+	port->line	= pdev->id;
+
+	uart_add_one_port(&ulite_uart_driver, port);
+	platform_set_drvdata(pdev, port);
+
+	return 0;
+}
+
+static int ulite_remove(struct platform_device *pdev)
+{
+	struct uart_port *port = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	if (port)
+		uart_remove_one_port(&ulite_uart_driver, port);
+
+	/* mark port as free */
+	port->membase = 0;
+
+	return 0;
+}
+
+static struct platform_driver ulite_platform_driver = {
+	.probe	= ulite_probe,
+	.remove	= ulite_remove,
+	.driver	= {
+		   .owner = THIS_MODULE,
+		   .name  = "uartlite",
+		   },
+};
+
+int __init ulite_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&ulite_uart_driver);
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&ulite_platform_driver);
+	if (ret)
+		uart_unregister_driver(&ulite_uart_driver);
+
+	return ret;
+}
+
+void __exit ulite_exit(void)
+{
+	platform_driver_unregister(&ulite_platform_driver);
+	uart_unregister_driver(&ulite_uart_driver);
+}
+
+module_init(ulite_init);
+module_exit(ulite_exit);
+
+MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
+MODULE_DESCRIPTION("Xilinx uartlite serial driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 72025df..494d9b8 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -148,7 +148,7 @@
 	void (*cs_control)(u32 command);
 };
 
-static void pump_messages(void *data);
+static void pump_messages(struct work_struct *work);
 
 static int flush(struct driver_data *drv_data)
 {
@@ -884,9 +884,10 @@
 	}
 }
 
-static void pump_messages(void *data)
+static void pump_messages(struct work_struct *work)
 {
-	struct driver_data *drv_data = data;
+	struct driver_data *drv_data =
+		container_of(work, struct driver_data, pump_messages);
 	unsigned long flags;
 
 	/* Lock queue and check for queue work */
@@ -1098,7 +1099,7 @@
 	tasklet_init(&drv_data->pump_transfers,
 			pump_transfers,	(unsigned long)drv_data);
 
-	INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
+	INIT_WORK(&drv_data->pump_messages, pump_messages);
 	drv_data->workqueue = create_singlethread_workqueue(
 					drv_data->master->cdev.dev->bus_id);
 	if (drv_data->workqueue == NULL)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c3c0626..270e621 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -360,12 +360,13 @@
 	if (!dev)
 		return NULL;
 
-	master = kzalloc(size + sizeof *master, SLAB_KERNEL);
+	master = kzalloc(size + sizeof *master, GFP_KERNEL);
 	if (!master)
 		return NULL;
 
 	class_device_initialize(&master->cdev);
 	master->cdev.class = &spi_master_class;
+	kobj_set_kset_s(&master->cdev, spi_master_class.subsys);
 	master->cdev.dev = get_device(dev);
 	spi_master_set_devdata(master, &master[1]);
 
@@ -447,7 +448,9 @@
  */
 void spi_unregister_master(struct spi_master *master)
 {
-	(void) device_for_each_child(master->cdev.dev, NULL, __unregister);
+	int dummy;
+
+	dummy = device_for_each_child(master->cdev.dev, NULL, __unregister);
 	class_device_unregister(&master->cdev);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_master);
@@ -463,15 +466,13 @@
  */
 struct spi_master *spi_busnum_to_master(u16 bus_num)
 {
-	if (bus_num) {
-		char			name[8];
-		struct kobject		*bus;
+	char			name[9];
+	struct kobject		*bus;
 
-		snprintf(name, sizeof name, "spi%u", bus_num);
-		bus = kset_find_obj(&spi_master_class.subsys.kset, name);
-		if (bus)
-			return container_of(bus, struct spi_master, cdev.kobj);
-	}
+	snprintf(name, sizeof name, "spi%u", bus_num);
+	bus = kset_find_obj(&spi_master_class.subsys.kset, name);
+	if (bus)
+		return container_of(bus, struct spi_master, cdev.kobj);
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
@@ -607,7 +608,7 @@
 {
 	int	status;
 
-	buf = kmalloc(SPI_BUFSIZ, SLAB_KERNEL);
+	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
 	if (!buf) {
 		status = -ENOMEM;
 		goto err0;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index a23862e..57289b6 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -196,7 +196,7 @@
 		return -EINVAL;
 
 	if (!cs) {
-		cs = kzalloc(sizeof *cs, SLAB_KERNEL);
+		cs = kzalloc(sizeof *cs, GFP_KERNEL);
 		if (!cs)
 			return -ENOMEM;
 		spi->controller_state = cs;
@@ -265,9 +265,10 @@
  * Drivers can provide word-at-a-time i/o primitives, or provide
  * transfer-at-a-time ones to leverage dma or fifo hardware.
  */
-static void bitbang_work(void *_bitbang)
+static void bitbang_work(struct work_struct *work)
 {
-	struct spi_bitbang	*bitbang = _bitbang;
+	struct spi_bitbang	*bitbang =
+		container_of(work, struct spi_bitbang, work);
 	unsigned long		flags;
 
 	spin_lock_irqsave(&bitbang->lock, flags);
@@ -456,7 +457,7 @@
 	if (!bitbang->master || !bitbang->chipselect)
 		return -EINVAL;
 
-	INIT_WORK(&bitbang->work, bitbang_work, bitbang);
+	INIT_WORK(&bitbang->work, bitbang_work);
 	spin_lock_init(&bitbang->lock);
 	INIT_LIST_HEAD(&bitbang->queue);
 
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c
index c2f601f..312987a 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi_butterfly.c
@@ -251,6 +251,8 @@
 	 * setting up a platform device like this is an ugly kluge...
 	 */
 	pdev = platform_device_register_simple("butterfly", -1, NULL, 0);
+	if (IS_ERR(pdev))
+		return;
 
 	master = spi_alloc_master(&pdev->dev, sizeof *pp);
 	if (!master) {
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index e656563..3dfa3e4 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -158,7 +158,7 @@
 	const struct cxacru_modem_type *modem_type;
 
 	int line_status;
-	struct work_struct poll_work;
+	struct delayed_work poll_work;
 
 	/* contol handles */
 	struct mutex cm_serialize;
@@ -347,7 +347,7 @@
 	return 0;
 }
 
-static void cxacru_poll_status(struct cxacru_data *instance);
+static void cxacru_poll_status(struct work_struct *work);
 
 static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
 		struct atm_dev *atm_dev)
@@ -376,12 +376,14 @@
 	}
 
 	/* Start status polling */
-	cxacru_poll_status(instance);
+	cxacru_poll_status(&instance->poll_work.work);
 	return 0;
 }
 
-static void cxacru_poll_status(struct cxacru_data *instance)
+static void cxacru_poll_status(struct work_struct *work)
 {
+	struct cxacru_data *instance =
+		container_of(work, struct cxacru_data, poll_work.work);
 	u32 buf[CXINF_MAX] = {};
 	struct usbatm_data *usbatm = instance->usbatm;
 	struct atm_dev *atm_dev = usbatm->atm_dev;
@@ -720,7 +722,7 @@
 
 	mutex_init(&instance->cm_serialize);
 
-	INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance);
+	INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);
 
 	usbatm_instance->driver_data = instance;
 
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index a823486..8ed6c75 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -142,7 +142,7 @@
 
 	struct speedtch_params params; /* set in probe, constant afterwards */
 
-	struct work_struct status_checker;
+	struct delayed_work status_checker;
 
 	unsigned char last_status;
 
@@ -498,8 +498,11 @@
 	return ret;
 }
 
-static void speedtch_check_status(struct speedtch_instance_data *instance)
+static void speedtch_check_status(struct work_struct *work)
 {
+	struct speedtch_instance_data *instance =
+		container_of(work, struct speedtch_instance_data,
+			     status_checker.work);
 	struct usbatm_data *usbatm = instance->usbatm;
 	struct atm_dev *atm_dev = usbatm->atm_dev;
 	unsigned char *buf = instance->scratch_buffer;
@@ -576,7 +579,7 @@
 {
 	struct speedtch_instance_data *instance = (void *)data;
 
-	schedule_work(&instance->status_checker);
+	schedule_delayed_work(&instance->status_checker, 0);
 
 	/* The following check is racy, but the race is harmless */
 	if (instance->poll_delay < MAX_POLL_DELAY)
@@ -596,7 +599,7 @@
 	if (int_urb) {
 		ret = usb_submit_urb(int_urb, GFP_ATOMIC);
 		if (!ret)
-			schedule_work(&instance->status_checker);
+			schedule_delayed_work(&instance->status_checker, 0);
 		else {
 			atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
 			mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
@@ -640,7 +643,7 @@
 
 	if ((int_urb = instance->int_urb)) {
 		ret = usb_submit_urb(int_urb, GFP_ATOMIC);
-		schedule_work(&instance->status_checker);
+		schedule_delayed_work(&instance->status_checker, 0);
 		if (ret < 0) {
 			atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
 			goto fail;
@@ -855,7 +858,7 @@
 
 	usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
 
-	INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance);
+	INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
 
 	instance->status_checker.timer.function = speedtch_status_poll;
 	instance->status_checker.timer.data = (unsigned long)instance;
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index c137c04..dae4ef1 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -64,6 +64,8 @@
 #include <linux/kthread.h>
 #include <linux/version.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
+
 #include <asm/unaligned.h>
 
 #include "usbatm.h"
@@ -655,9 +657,9 @@
 /*
  * The uea_load_page() function must be called within a process context
  */
-static void uea_load_page(void *xsc)
+static void uea_load_page(struct work_struct *work)
 {
-	struct uea_softc *sc = xsc;
+	struct uea_softc *sc = container_of(work, struct uea_softc, task);
 	u16 pageno = sc->pageno;
 	u16 ovl = sc->ovl;
 	struct block_info bi;
@@ -1348,7 +1350,7 @@
 
 	uea_enters(INS_TO_USBDEV(sc));
 
-	INIT_WORK(&sc->task, uea_load_page, sc);
+	INIT_WORK(&sc->task, uea_load_page);
 	init_waitqueue_head(&sc->sync_q);
 	init_waitqueue_head(&sc->cmv_ack_wait);
 
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ec3438d..7f1fa95 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -421,9 +421,9 @@
 		schedule_work(&acm->work);
 }
 
-static void acm_softint(void *private)
+static void acm_softint(struct work_struct *work)
 {
-	struct acm *acm = private;
+	struct acm *acm = container_of(work, struct acm, work);
 	dbg("Entering acm_softint.");
 	
 	if (!ACM_READY(acm))
@@ -927,7 +927,7 @@
 	acm->rx_buflimit = num_rx_buf;
 	acm->urb_task.func = acm_rx_tasklet;
 	acm->urb_task.data = (unsigned long) acm;
-	INIT_WORK(&acm->work, acm_softint, acm);
+	INIT_WORK(&acm->work, acm_softint);
 	spin_lock_init(&acm->throttle_lock);
 	spin_lock_init(&acm->write_lock);
 	spin_lock_init(&acm->read_lock);
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 840442a..c3915dc 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -93,7 +93,7 @@
 }
 
 
-/* sometimes alloc/free could use kmalloc with SLAB_DMA, for
+/* sometimes alloc/free could use kmalloc with GFP_DMA, for
  * better sharing and to leverage mm/slab.c intelligence.
  */
 
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0ce393e..2651c2e 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -22,6 +22,7 @@
 #include <linux/usbdevice_fs.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 #include <asm/semaphore.h>
 #include <asm/uaccess.h>
@@ -68,7 +69,7 @@
 
 	unsigned		has_indicators:1;
 	u8			indicator[USB_MAXCHILDREN];
-	struct work_struct	leds;
+	struct delayed_work	leds;
 };
 
 
@@ -218,9 +219,10 @@
 
 #define	LED_CYCLE_PERIOD	((2*HZ)/3)
 
-static void led_work (void *__hub)
+static void led_work (struct work_struct *work)
 {
-	struct usb_hub		*hub = __hub;
+	struct usb_hub		*hub =
+		container_of(work, struct usb_hub, leds.work);
 	struct usb_device	*hdev = hub->hdev;
 	unsigned		i;
 	unsigned		changed = 0;
@@ -405,9 +407,10 @@
  * talking to TTs must queue control transfers (not just bulk and iso), so
  * both can talk to the same hub concurrently.
  */
-static void hub_tt_kevent (void *arg)
+static void hub_tt_kevent (struct work_struct *work)
 {
-	struct usb_hub		*hub = arg;
+	struct usb_hub		*hub =
+		container_of(work, struct usb_hub, tt.kevent);
 	unsigned long		flags;
 
 	spin_lock_irqsave (&hub->tt.lock, flags);
@@ -458,7 +461,7 @@
 	 * since each TT has "at least two" buffers that can need it (and
 	 * there can be many TTs per hub).  even if they're uncommon.
 	 */
-	if ((clear = kmalloc (sizeof *clear, SLAB_ATOMIC)) == NULL) {
+	if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) {
 		dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
 		/* FIXME recover somehow ... RESET_TT? */
 		return;
@@ -694,7 +697,7 @@
 
 	spin_lock_init (&hub->tt.lock);
 	INIT_LIST_HEAD (&hub->tt.clear_list);
-	INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub);
+	INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
 	switch (hdev->descriptor.bDeviceProtocol) {
 		case 0:
 			break;
@@ -938,7 +941,7 @@
 	INIT_LIST_HEAD(&hub->event_list);
 	hub->intfdev = &intf->dev;
 	hub->hdev = hdev;
-	INIT_WORK(&hub->leds, led_work, hub);
+	INIT_DELAYED_WORK(&hub->leds, led_work);
 
 	usb_set_intfdata (intf, hub);
 	intf->needs_remote_wakeup = 1;
@@ -2369,7 +2372,7 @@
 	struct usb_qualifier_descriptor	*qual;
 	int				status;
 
-	qual = kmalloc (sizeof *qual, SLAB_KERNEL);
+	qual = kmalloc (sizeof *qual, GFP_KERNEL);
 	if (qual == NULL)
 		return;
 
@@ -2381,7 +2384,7 @@
 		/* hub LEDs are probably harder to miss than syslog */
 		if (hub->has_indicators) {
 			hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
-			schedule_work (&hub->leds);
+			schedule_delayed_work (&hub->leds, 0);
 		}
 	}
 	kfree(qual);
@@ -2555,7 +2558,7 @@
 				if (hub->has_indicators) {
 					hub->indicator[port1-1] =
 						INDICATOR_AMBER_BLINK;
-					schedule_work (&hub->leds);
+					schedule_delayed_work (&hub->leds, 0);
 				}
 				status = -ENOTCONN;	/* Don't retry */
 				goto loop_disable;
@@ -2920,7 +2923,7 @@
 		if (len < le16_to_cpu(udev->config[index].desc.wTotalLength))
 			len = le16_to_cpu(udev->config[index].desc.wTotalLength);
 	}
-	buf = kmalloc (len, SLAB_KERNEL);
+	buf = kmalloc (len, GFP_KERNEL);
 	if (buf == NULL) {
 		dev_err(&udev->dev, "no mem to re-read configs after reset\n");
 		/* assume the worst */
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 29b0fa9..149aa8b 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -488,7 +488,7 @@
 		int	retval;
 
 		io->urbs [i]->dev = io->dev;
-		retval = usb_submit_urb (io->urbs [i], SLAB_ATOMIC);
+		retval = usb_submit_urb (io->urbs [i], GFP_ATOMIC);
 
 		/* after we submit, let completions or cancelations fire;
 		 * we handshake using io->status.
@@ -1501,9 +1501,10 @@
 };
 
 /* Worker routine for usb_driver_set_configuration() */
-static void driver_set_config_work(void *_req)
+static void driver_set_config_work(struct work_struct *work)
 {
-	struct set_config_request *req = _req;
+	struct set_config_request *req =
+		container_of(work, struct set_config_request, work);
 
 	usb_lock_device(req->udev);
 	usb_set_configuration(req->udev, req->config);
@@ -1541,7 +1542,7 @@
 		return -ENOMEM;
 	req->udev = udev;
 	req->config = config;
-	INIT_WORK(&req->work, driver_set_config_work, req);
+	INIT_WORK(&req->work, driver_set_config_work);
 
 	usb_get_dev(udev);
 	if (!schedule_work(&req->work)) {
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 81cb525..02426d0 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -203,9 +203,10 @@
 #ifdef	CONFIG_USB_SUSPEND
 
 /* usb_autosuspend_work - callback routine to autosuspend a USB device */
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
 {
-	struct usb_device	*udev = _udev;
+	struct usb_device *udev =
+		container_of(work, struct usb_device, autosuspend.work);
 
 	usb_pm_lock(udev);
 	udev->auto_pm = 1;
@@ -215,7 +216,7 @@
 
 #else
 
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
 {}
 
 #endif	/* CONFIG_USB_SUSPEND */
@@ -304,7 +305,7 @@
 
 #ifdef	CONFIG_PM
 	mutex_init(&dev->pm_mutex);
-	INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev);
+	INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
 #endif
 	return dev;
 }
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 3bd1dfe..d15bf22 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -1833,9 +1833,9 @@
 	spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
-static void eth_work (void *_dev)
+static void eth_work (struct work_struct *work)
 {
-	struct eth_dev		*dev = _dev;
+	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
 
 	if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
 		if (netif_running (dev->net))
@@ -2398,7 +2398,7 @@
 	dev = netdev_priv(net);
 	spin_lock_init (&dev->lock);
 	spin_lock_init (&dev->req_lock);
-	INIT_WORK (&dev->work, eth_work, dev);
+	INIT_WORK (&dev->work, eth_work);
 	INIT_LIST_HEAD (&dev->tx_reqs);
 	INIT_LIST_HEAD (&dev->rx_reqs);
 
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 8b975d1..c98316c 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -250,7 +250,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/utsname.h>
 
 #include <linux/usb_ch9.h>
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 64554ac..3135182 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -1236,7 +1236,7 @@
 
 
 	/* ok, we made sense of the hardware ... */
-	dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev) {
 		return -ENOMEM;
 	}
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index a3076da..805a982 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1864,7 +1864,7 @@
 	}
 
 	/* alloc, and start init */
-	dev = kmalloc (sizeof *dev, SLAB_KERNEL);
+	dev = kmalloc (sizeof *dev, GFP_KERNEL);
 	if (dev == NULL){
 		pr_debug("enomem %s\n", pci_name(pdev));
 		retval = -ENOMEM;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 86924f9..3fb1044a 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -412,7 +412,7 @@
 	/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
 
 	value = -ENOMEM;
-	kbuf = kmalloc (len, SLAB_KERNEL);
+	kbuf = kmalloc (len, GFP_KERNEL);
 	if (unlikely (!kbuf))
 		goto free1;
 
@@ -456,7 +456,7 @@
 	/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
 
 	value = -ENOMEM;
-	kbuf = kmalloc (len, SLAB_KERNEL);
+	kbuf = kmalloc (len, GFP_KERNEL);
 	if (!kbuf)
 		goto free1;
 	if (copy_from_user (kbuf, buf, len)) {
@@ -1898,7 +1898,7 @@
 	buf += 4;
 	length -= 4;
 
-	kbuf = kmalloc (length, SLAB_KERNEL);
+	kbuf = kmalloc (length, GFP_KERNEL);
 	if (!kbuf)
 		return -ENOMEM;
 	if (copy_from_user (kbuf, buf, length)) {
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 0b59083..3024c67 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -2861,7 +2861,7 @@
 	}
 
 	/* alloc, and start init */
-	dev = kzalloc (sizeof *dev, SLAB_KERNEL);
+	dev = kzalloc (sizeof *dev, GFP_KERNEL);
 	if (dev == NULL){
 		retval = -ENOMEM;
 		goto done;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 48a09fd..030d87c 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -2581,7 +2581,7 @@
 	/* UDC_PULLUP_EN gates the chip clock */
 	// OTG_SYSCON_1_REG |= DEV_IDLE_EN;
 
-	udc = kzalloc(sizeof(*udc), SLAB_KERNEL);
+	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
 	if (!udc)
 		return -ENOMEM;
 
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 0f809dd..40710ea 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -1190,7 +1190,7 @@
 
 
 	/* ok, we made sense of the hardware ... */
-	dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev)
 		return -ENOMEM;
 	spin_lock_init (&dev->lock);
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 34b7a31..56349d2 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -492,7 +492,7 @@
 	unsigned		i;
 	__le32			tag;
 
-	if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
+	if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
 		return 0;
 	seen_count = 0;
 
diff --git a/drivers/usb/host/hc_crisv10.c b/drivers/usb/host/hc_crisv10.c
index 87eca6a..9325e46 100644
--- a/drivers/usb/host/hc_crisv10.c
+++ b/drivers/usb/host/hc_crisv10.c
@@ -188,7 +188,7 @@
 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
 
-#define SLAB_FLAG     (in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL)
+#define SLAB_FLAG     (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
 #define KMALLOC_FLAG  (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
 
 /* Most helpful debugging aid */
@@ -275,13 +275,13 @@
 static int zout_buffer[4] __attribute__ ((aligned (4)));
 
 /* Cache for allocating new EP and SB descriptors. */
-static kmem_cache_t *usb_desc_cache;
+static struct kmem_cache *usb_desc_cache;
 
 /* Cache for the registers allocated in the top half. */
-static kmem_cache_t *top_half_reg_cache;
+static struct kmem_cache *top_half_reg_cache;
 
 /* Cache for the data allocated in the isoc descr top half. */
-static kmem_cache_t *isoc_compl_cache;
+static struct kmem_cache *isoc_compl_cache;
 
 static struct usb_bus *etrax_usb_bus;
 
@@ -1743,7 +1743,7 @@
 
 		*R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
 
-		comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
+		comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC);
 		assert(comp_data != NULL);
 
                 INIT_WORK(&comp_data->usb_bh, etrax_usb_isoc_descr_interrupt_bottom_half, comp_data);
@@ -3010,7 +3010,7 @@
 			if (!urb->iso_frame_desc[i].length)
 				continue;
 
-			next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
+			next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC);
 			assert(next_sb_desc != NULL);
 
 			if (urb->iso_frame_desc[i].length > 0) {
@@ -3063,7 +3063,7 @@
 		if (TxIsocEPList[epid].sub == 0) {
 			dbg_isoc("Isoc traffic not already running, allocating SB");
 
-			next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
+			next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC);
 			assert(next_sb_desc != NULL);
 
 			next_sb_desc->command = (IO_STATE(USB_SB_command, tt, in) |
@@ -3317,7 +3317,7 @@
 
 	restore_flags(flags);
 
-	reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, SLAB_ATOMIC);
+	reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, GFP_ATOMIC);
 
 	assert(reg != NULL);
 
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 8293c1d..0f47a57 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -505,7 +505,7 @@
 	char			*next;
 	unsigned		i;
 
-	if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
+	if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
 		return 0;
 	seen_count = 0;
 
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 2dbb774..7f26f9b 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -134,7 +134,7 @@
 {
 	struct i2c_client *c;
 
-	c = (struct i2c_client *)kzalloc(sizeof(*c), SLAB_KERNEL);
+	c = (struct i2c_client *)kzalloc(sizeof(*c), GFP_KERNEL);
 
 	if (!c)
 		return -ENOMEM;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index ef54e310b..a9d7119 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -163,7 +163,7 @@
         u16 queue_next;
         struct urb *urb_list[ENDP_QUEUE_SIZE];
         struct list_head urb_more;
-        struct work_struct scheduler;
+        struct delayed_work scheduler;
 };
 struct u132_ring {
         unsigned in_use:1;
@@ -171,7 +171,7 @@
         u8 number;
         struct u132 *u132;
         struct u132_endp *curr_endp;
-        struct work_struct scheduler;
+        struct delayed_work scheduler;
 };
 #define OHCI_QUIRK_AMD756 0x01
 #define OHCI_QUIRK_SUPERIO 0x02
@@ -198,7 +198,7 @@
         u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
         int flags;
         unsigned long next_statechange;
-        struct work_struct monitor;
+        struct delayed_work monitor;
         int num_endpoints;
         struct u132_addr addr[MAX_U132_ADDRS];
         struct u132_udev udev[MAX_U132_UDEVS];
@@ -310,7 +310,7 @@
         if (delta > 0) {
                 if (queue_delayed_work(workqueue, &ring->scheduler, delta))
                         return;
-        } else if (queue_work(workqueue, &ring->scheduler))
+        } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
                 return;
         kref_put(&u132->kref, u132_hcd_delete);
         return;
@@ -389,12 +389,8 @@
 static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
         unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &endp->scheduler, delta))
-                        kref_get(&endp->kref);
-        } else if (queue_work(workqueue, &endp->scheduler))
-                kref_get(&endp->kref);
-        return;
+	if (queue_delayed_work(workqueue, &endp->scheduler, delta))
+		kref_get(&endp->kref);
 }
 
 static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
@@ -410,24 +406,14 @@
 
 static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &u132->monitor, delta)) {
-                        kref_get(&u132->kref);
-                }
-        } else if (queue_work(workqueue, &u132->monitor))
-                kref_get(&u132->kref);
-        return;
+	if (queue_delayed_work(workqueue, &u132->monitor, delta))
+		kref_get(&u132->kref);
 }
 
 static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &u132->monitor, delta))
-                        return;
-        } else if (queue_work(workqueue, &u132->monitor))
-                return;
-        kref_put(&u132->kref, u132_hcd_delete);
-        return;
+	if (!queue_delayed_work(workqueue, &u132->monitor, delta))
+		kref_put(&u132->kref, u132_hcd_delete);
 }
 
 static void u132_monitor_cancel_work(struct u132 *u132)
@@ -489,9 +475,9 @@
         return 0;
 }
 
-static void u132_hcd_monitor_work(void *data)
+static void u132_hcd_monitor_work(struct work_struct *work)
 {
-        struct u132 *u132 = data;
+        struct u132 *u132 = container_of(work, struct u132, monitor.work);
         if (u132->going > 1) {
                 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
                         , u132->going);
@@ -1315,15 +1301,14 @@
         }
 }
 
-static void u132_hcd_ring_work_scheduler(void *data);
-static void u132_hcd_endp_work_scheduler(void *data);
 /*
 * this work function is only executed from the work queue
 *
 */
-static void u132_hcd_ring_work_scheduler(void *data)
+static void u132_hcd_ring_work_scheduler(struct work_struct *work)
 {
-        struct u132_ring *ring = data;
+        struct u132_ring *ring =
+		container_of(work, struct u132_ring, scheduler.work);
         struct u132 *u132 = ring->u132;
         down(&u132->scheduler_lock);
         if (ring->in_use) {
@@ -1382,10 +1367,11 @@
         }
 }
 
-static void u132_hcd_endp_work_scheduler(void *data)
+static void u132_hcd_endp_work_scheduler(struct work_struct *work)
 {
         struct u132_ring *ring;
-        struct u132_endp *endp = data;
+        struct u132_endp *endp =
+		container_of(work, struct u132_endp, scheduler.work);
         struct u132 *u132 = endp->u132;
         down(&u132->scheduler_lock);
         ring = endp->ring;
@@ -1943,7 +1929,7 @@
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         ring = endp->ring = &u132->ring[0];
@@ -2032,7 +2018,7 @@
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         endp->dequeueing = 0;
@@ -2117,7 +2103,7 @@
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         ring = endp->ring = &u132->ring[0];
@@ -3096,10 +3082,10 @@
                 ring->number = rings + 1;
                 ring->length = 0;
                 ring->curr_endp = NULL;
-                INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler,
-                        (void *)ring);
+                INIT_DELAYED_WORK(&ring->scheduler,
+				  u132_hcd_ring_work_scheduler);
         } down(&u132->sw_lock);
-        INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132);
+        INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
         while (ports-- > 0) {
                 struct u132_port *port = &u132->port[ports];
                 port->u132 = u132;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 226bf3d..e87692c 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -81,7 +81,7 @@
 static char *errbuf;
 #define ERRBUF_LEN    (32 * 1024)
 
-static kmem_cache_t *uhci_up_cachep;	/* urb_priv */
+static struct kmem_cache *uhci_up_cachep;	/* urb_priv */
 
 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
 static void wakeup_rh(struct uhci_hcd *uhci);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 06115f2..30b8845 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -498,7 +498,7 @@
 {
 	struct urb_priv *urbp;
 
-	urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
+	urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC);
 	if (!urbp)
 		return NULL;
 
diff --git a/drivers/usb/input/acecad.c b/drivers/usb/input/acecad.c
index 0096373..909138e 100644
--- a/drivers/usb/input/acecad.c
+++ b/drivers/usb/input/acecad.c
@@ -152,7 +152,7 @@
 	if (!acecad || !input_dev)
 		goto fail1;
 
-	acecad->data = usb_buffer_alloc(dev, 8, SLAB_KERNEL, &acecad->data_dma);
+	acecad->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &acecad->data_dma);
 	if (!acecad->data)
 		goto fail1;
 
diff --git a/drivers/usb/input/aiptek.c b/drivers/usb/input/aiptek.c
index bf42818..9f52429 100644
--- a/drivers/usb/input/aiptek.c
+++ b/drivers/usb/input/aiptek.c
@@ -1988,7 +1988,7 @@
 		goto fail1;
 
 	aiptek->data = usb_buffer_alloc(usbdev, AIPTEK_PACKET_LENGTH,
-					SLAB_ATOMIC, &aiptek->data_dma);
+					GFP_ATOMIC, &aiptek->data_dma);
 	if (!aiptek->data)
 		goto fail1;
 
diff --git a/drivers/usb/input/ati_remote.c b/drivers/usb/input/ati_remote.c
index ff23318..b724e36 100644
--- a/drivers/usb/input/ati_remote.c
+++ b/drivers/usb/input/ati_remote.c
@@ -592,7 +592,7 @@
 			__FUNCTION__, urb->status);
 	}
 
-	retval = usb_submit_urb(urb, SLAB_ATOMIC);
+	retval = usb_submit_urb(urb, GFP_ATOMIC);
 	if (retval)
 		dev_err(&ati_remote->interface->dev, "%s: usb_submit_urb()=%d\n",
 			__FUNCTION__, retval);
@@ -604,12 +604,12 @@
 static int ati_remote_alloc_buffers(struct usb_device *udev,
 				    struct ati_remote *ati_remote)
 {
-	ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, SLAB_ATOMIC,
+	ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
 					     &ati_remote->inbuf_dma);
 	if (!ati_remote->inbuf)
 		return -1;
 
-	ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, SLAB_ATOMIC,
+	ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
 					      &ati_remote->outbuf_dma);
 	if (!ati_remote->outbuf)
 		return -1;
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index a49644b..f1d0e1d 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -969,9 +969,10 @@
 }
 
 /* Workqueue routine to reset the device or clear a halt */
-static void hid_reset(void *_hid)
+static void hid_reset(struct work_struct *work)
 {
-	struct hid_device *hid = (struct hid_device *) _hid;
+	struct hid_device *hid =
+		container_of(work, struct hid_device, reset_work);
 	int rc_lock, rc = 0;
 
 	if (test_bit(HID_CLEAR_HALT, &hid->iofl)) {
@@ -1078,7 +1079,7 @@
 			warn("input irq status %d received", urb->status);
 	}
 
-	status = usb_submit_urb(urb, SLAB_ATOMIC);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status) {
 		clear_bit(HID_IN_RUNNING, &hid->iofl);
 		if (status != -EPERM) {
@@ -1863,13 +1864,13 @@
 
 static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
 {
-	if (!(hid->inbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->inbuf_dma)))
+	if (!(hid->inbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->inbuf_dma)))
 		return -1;
-	if (!(hid->outbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->outbuf_dma)))
+	if (!(hid->outbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->outbuf_dma)))
 		return -1;
-	if (!(hid->cr = usb_buffer_alloc(dev, sizeof(*(hid->cr)), SLAB_ATOMIC, &hid->cr_dma)))
+	if (!(hid->cr = usb_buffer_alloc(dev, sizeof(*(hid->cr)), GFP_ATOMIC, &hid->cr_dma)))
 		return -1;
-	if (!(hid->ctrlbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->ctrlbuf_dma)))
+	if (!(hid->ctrlbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->ctrlbuf_dma)))
 		return -1;
 
 	return 0;
@@ -2043,7 +2044,7 @@
 
 	init_waitqueue_head(&hid->wait);
 
-	INIT_WORK(&hid->reset_work, hid_reset, hid);
+	INIT_WORK(&hid->reset_work, hid_reset);
 	setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid);
 
 	spin_lock_init(&hid->inlock);
diff --git a/drivers/usb/input/keyspan_remote.c b/drivers/usb/input/keyspan_remote.c
index 50aa810..98bd323 100644
--- a/drivers/usb/input/keyspan_remote.c
+++ b/drivers/usb/input/keyspan_remote.c
@@ -456,7 +456,7 @@
 	remote->in_endpoint = endpoint;
 	remote->toggle = -1;	/* Set to -1 so we will always not match the toggle from the first remote message. */
 
-	remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, SLAB_ATOMIC, &remote->in_dma);
+	remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
 	if (!remote->in_buffer) {
 		retval = -ENOMEM;
 		goto fail1;
diff --git a/drivers/usb/input/mtouchusb.c b/drivers/usb/input/mtouchusb.c
index 79a85d4..92c4e07 100644
--- a/drivers/usb/input/mtouchusb.c
+++ b/drivers/usb/input/mtouchusb.c
@@ -164,7 +164,7 @@
 	dbg("%s - called", __FUNCTION__);
 
 	mtouch->data = usb_buffer_alloc(udev, MTOUCHUSB_REPORT_DATA_SIZE,
-					SLAB_ATOMIC, &mtouch->data_dma);
+					GFP_ATOMIC, &mtouch->data_dma);
 
 	if (!mtouch->data)
 		return -1;
diff --git a/drivers/usb/input/powermate.c b/drivers/usb/input/powermate.c
index 0bf9177..fea97e5 100644
--- a/drivers/usb/input/powermate.c
+++ b/drivers/usb/input/powermate.c
@@ -277,12 +277,12 @@
 static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
 {
 	pm->data = usb_buffer_alloc(udev, POWERMATE_PAYLOAD_SIZE_MAX,
-				    SLAB_ATOMIC, &pm->data_dma);
+				    GFP_ATOMIC, &pm->data_dma);
 	if (!pm->data)
 		return -1;
 
 	pm->configcr = usb_buffer_alloc(udev, sizeof(*(pm->configcr)),
-					SLAB_ATOMIC, &pm->configcr_dma);
+					GFP_ATOMIC, &pm->configcr_dma);
 	if (!pm->configcr)
 		return -1;
 
diff --git a/drivers/usb/input/touchkitusb.c b/drivers/usb/input/touchkitusb.c
index 05c0d1c..2a314b0 100644
--- a/drivers/usb/input/touchkitusb.c
+++ b/drivers/usb/input/touchkitusb.c
@@ -248,7 +248,7 @@
 				  struct touchkit_usb *touchkit)
 {
 	touchkit->data = usb_buffer_alloc(udev, TOUCHKIT_REPORT_DATA_SIZE,
-	                                  SLAB_ATOMIC, &touchkit->data_dma);
+	                                  GFP_ATOMIC, &touchkit->data_dma);
 
 	if (!touchkit->data)
 		return -1;
diff --git a/drivers/usb/input/usbkbd.c b/drivers/usb/input/usbkbd.c
index dac8864..8505824 100644
--- a/drivers/usb/input/usbkbd.c
+++ b/drivers/usb/input/usbkbd.c
@@ -122,7 +122,7 @@
 	memcpy(kbd->old, kbd->new, 8);
 
 resubmit:
-	i = usb_submit_urb (urb, SLAB_ATOMIC);
+	i = usb_submit_urb (urb, GFP_ATOMIC);
 	if (i)
 		err ("can't resubmit intr, %s-%s/input0, status %d",
 				kbd->usbdev->bus->bus_name,
@@ -196,11 +196,11 @@
 		return -1;
 	if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL)))
 		return -1;
-	if (!(kbd->new = usb_buffer_alloc(dev, 8, SLAB_ATOMIC, &kbd->new_dma)))
+	if (!(kbd->new = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
 		return -1;
-	if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), SLAB_ATOMIC, &kbd->cr_dma)))
+	if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), GFP_ATOMIC, &kbd->cr_dma)))
 		return -1;
-	if (!(kbd->leds = usb_buffer_alloc(dev, 1, SLAB_ATOMIC, &kbd->leds_dma)))
+	if (!(kbd->leds = usb_buffer_alloc(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
 		return -1;
 
 	return 0;
diff --git a/drivers/usb/input/usbmouse.c b/drivers/usb/input/usbmouse.c
index 68a5564..64a33e4 100644
--- a/drivers/usb/input/usbmouse.c
+++ b/drivers/usb/input/usbmouse.c
@@ -86,7 +86,7 @@
 
 	input_sync(dev);
 resubmit:
-	status = usb_submit_urb (urb, SLAB_ATOMIC);
+	status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status)
 		err ("can't resubmit intr, %s-%s/input0, status %d",
 				mouse->usbdev->bus->bus_name,
@@ -137,7 +137,7 @@
 	if (!mouse || !input_dev)
 		goto fail1;
 
-	mouse->data = usb_buffer_alloc(dev, 8, SLAB_ATOMIC, &mouse->data_dma);
+	mouse->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &mouse->data_dma);
 	if (!mouse->data)
 		goto fail1;
 
diff --git a/drivers/usb/input/usbtouchscreen.c b/drivers/usb/input/usbtouchscreen.c
index 49704d4..7f3c57d 100644
--- a/drivers/usb/input/usbtouchscreen.c
+++ b/drivers/usb/input/usbtouchscreen.c
@@ -680,7 +680,7 @@
 		type->process_pkt = usbtouch_process_pkt;
 
 	usbtouch->data = usb_buffer_alloc(udev, type->rept_size,
-	                                  SLAB_KERNEL, &usbtouch->data_dma);
+	                                  GFP_KERNEL, &usbtouch->data_dma);
 	if (!usbtouch->data)
 		goto out_free;
 
diff --git a/drivers/usb/input/xpad.c b/drivers/usb/input/xpad.c
index df97e5c..e4bc76e 100644
--- a/drivers/usb/input/xpad.c
+++ b/drivers/usb/input/xpad.c
@@ -325,7 +325,7 @@
 		goto fail1;
 
 	xpad->idata = usb_buffer_alloc(udev, XPAD_PKT_LEN,
-				       SLAB_ATOMIC, &xpad->idata_dma);
+				       GFP_ATOMIC, &xpad->idata_dma);
 	if (!xpad->idata)
 		goto fail1;
 
diff --git a/drivers/usb/input/yealink.c b/drivers/usb/input/yealink.c
index 2268ca3..caff8e6 100644
--- a/drivers/usb/input/yealink.c
+++ b/drivers/usb/input/yealink.c
@@ -874,17 +874,17 @@
 
 	/* allocate usb buffers */
 	yld->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN,
-					SLAB_ATOMIC, &yld->irq_dma);
+					GFP_ATOMIC, &yld->irq_dma);
 	if (yld->irq_data == NULL)
 		return usb_cleanup(yld, -ENOMEM);
 
 	yld->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN,
-					SLAB_ATOMIC, &yld->ctl_dma);
+					GFP_ATOMIC, &yld->ctl_dma);
 	if (!yld->ctl_data)
 		return usb_cleanup(yld, -ENOMEM);
 
 	yld->ctl_req = usb_buffer_alloc(udev, sizeof(*(yld->ctl_req)),
-					SLAB_ATOMIC, &yld->ctl_req_dma);
+					GFP_ATOMIC, &yld->ctl_req_dma);
 	if (yld->ctl_req == NULL)
 		return usb_cleanup(yld, -ENOMEM);
 
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index ba30ca6..02cbb7f 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -76,7 +76,7 @@
 	char *urbdata;			/* interrupt URB data buffer */
 	char *msgdata;			/* control message data buffer */
 
-	struct work_struct work;
+	struct delayed_work work;
 	int button_pressed;
 	spinlock_t lock;
 };
@@ -117,7 +117,7 @@
 	case ACD_BTN_BRIGHT_UP:
 	case ACD_BTN_BRIGHT_DOWN:
 		pdata->button_pressed = 1;
-		queue_work(wq, &pdata->work);
+		queue_delayed_work(wq, &pdata->work, 0);
 		break;
 	case ACD_BTN_NONE:
 	default:
@@ -184,9 +184,10 @@
 	.max_brightness	= 0xFF
 };
 
-static void appledisplay_work(void *private)
+static void appledisplay_work(struct work_struct *work)
 {
-	struct appledisplay *pdata = private;
+	struct appledisplay *pdata =
+		container_of(work, struct appledisplay, work.work);
 	int retval;
 
 	up(&pdata->bd->sem);
@@ -238,7 +239,7 @@
 	pdata->udev = udev;
 
 	spin_lock_init(&pdata->lock);
-	INIT_WORK(&pdata->work, appledisplay_work, pdata);
+	INIT_DELAYED_WORK(&pdata->work, appledisplay_work);
 
 	/* Allocate buffer for control messages */
 	pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index cb0ba31..18b1925 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -156,9 +156,9 @@
         struct usb_device *udev;
         struct usb_interface *interface;
         struct usb_class_driver *class;
-        struct work_struct status_work;
-        struct work_struct command_work;
-        struct work_struct respond_work;
+        struct delayed_work status_work;
+        struct delayed_work command_work;
+        struct delayed_work respond_work;
         struct u132_platform_data platform_data;
         struct resource resources[0];
         struct platform_device platform_dev;
@@ -210,23 +210,14 @@
 
 static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
-                        return;
-        } else if (queue_work(status_queue, &ftdi->status_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+	if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
+		kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(status_queue, &ftdi->status_work))
-                kref_get(&ftdi->kref);
-        return;
+	if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
+		kref_get(&ftdi->kref);
 }
 
 static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
@@ -237,25 +228,14 @@
 
 static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(command_queue, &ftdi->command_work,
-                        delta))
-                        return;
-        } else if (queue_work(command_queue, &ftdi->command_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+	if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
+		kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(command_queue, &ftdi->command_work,
-                        delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(command_queue, &ftdi->command_work))
-                kref_get(&ftdi->kref);
-        return;
+	if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
+		kref_get(&ftdi->kref);
 }
 
 static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
@@ -267,25 +247,14 @@
 static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
         unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
-                        delta))
-                        return;
-        } else if (queue_work(respond_queue, &ftdi->respond_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+	if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+		kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
-                        delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(respond_queue, &ftdi->respond_work))
-                kref_get(&ftdi->kref);
-        return;
+	if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+		kref_get(&ftdi->kref);
 }
 
 static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
@@ -475,9 +444,11 @@
         return;
 }
 
-static void ftdi_elan_command_work(void *data)
+static void ftdi_elan_command_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+		container_of(work, struct usb_ftdi, command_work.work);
+
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
                 return;
@@ -500,9 +471,10 @@
         return;
 }
 
-static void ftdi_elan_respond_work(void *data)
+static void ftdi_elan_respond_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+		container_of(work, struct usb_ftdi, respond_work.work);
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
                 return;
@@ -534,9 +506,10 @@
 * after the FTDI has been synchronized
 *
 */
-static void ftdi_elan_status_work(void *data)
+static void ftdi_elan_status_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+		container_of(work, struct usb_ftdi, status_work.work);
         int work_delay_in_msec = 0;
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
@@ -2677,12 +2650,9 @@
                 ftdi->class = NULL;
                 dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a"
                         "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber);
-                INIT_WORK(&ftdi->status_work, ftdi_elan_status_work,
-                        (void *)ftdi);
-                INIT_WORK(&ftdi->command_work, ftdi_elan_command_work,
-                        (void *)ftdi);
-                INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work,
-                        (void *)ftdi);
+                INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work);
+                INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work);
+                INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work);
                 ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000));
                 return 0;
         } else {
diff --git a/drivers/usb/misc/phidgetkit.c b/drivers/usb/misc/phidgetkit.c
index 9110793..371bf2b 100644
--- a/drivers/usb/misc/phidgetkit.c
+++ b/drivers/usb/misc/phidgetkit.c
@@ -81,8 +81,8 @@
 	unsigned char *data;
 	dma_addr_t data_dma;
 
-	struct work_struct do_notify;
-	struct work_struct do_resubmit;
+	struct delayed_work do_notify;
+	struct delayed_work do_resubmit;
 	unsigned long input_events;
 	unsigned long sensor_events;
 };
@@ -374,19 +374,20 @@
 	}
 
 	if (kit->input_events || kit->sensor_events)
-		schedule_work(&kit->do_notify);
+		schedule_delayed_work(&kit->do_notify, 0);
 
 resubmit:
-	status = usb_submit_urb(urb, SLAB_ATOMIC);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status)
 		err("can't resubmit intr, %s-%s/interfacekit0, status %d",
 			kit->udev->bus->bus_name,
 			kit->udev->devpath, status);
 }
 
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
 {
-	struct interfacekit *kit = data;
+	struct interfacekit *kit =
+		container_of(work, struct interfacekit, do_notify.work);
 	int i;
 	char sysfs_file[8];
 
@@ -405,9 +406,11 @@
 	}
 }
 
-static void do_resubmit(void *data)
+static void do_resubmit(struct work_struct *work)
 {
-	set_outputs(data);
+	struct interfacekit *kit =
+		container_of(work, struct interfacekit, do_resubmit.work);
+	set_outputs(kit);
 }
 
 #define show_set_output(value)		\
@@ -565,7 +568,7 @@
 
 	kit->dev_no = -1;
 	kit->ifkit = ifkit;
-	kit->data = usb_buffer_alloc(dev, URB_INT_SIZE, SLAB_ATOMIC, &kit->data_dma);
+	kit->data = usb_buffer_alloc(dev, URB_INT_SIZE, GFP_ATOMIC, &kit->data_dma);
 	if (!kit->data)
 		goto out;
 
@@ -575,8 +578,8 @@
 
 	kit->udev = usb_get_dev(dev);
 	kit->intf = intf;
-	INIT_WORK(&kit->do_notify, do_notify, kit);
-	INIT_WORK(&kit->do_resubmit, do_resubmit, kit);
+	INIT_DELAYED_WORK(&kit->do_notify, do_notify);
+	INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit);
 	usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data,
 			maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
 			interfacekit_irq, kit, endpoint->bInterval);
diff --git a/drivers/usb/misc/phidgetmotorcontrol.c b/drivers/usb/misc/phidgetmotorcontrol.c
index c3469b0..5727e1e 100644
--- a/drivers/usb/misc/phidgetmotorcontrol.c
+++ b/drivers/usb/misc/phidgetmotorcontrol.c
@@ -41,7 +41,7 @@
 	unsigned char *data;
 	dma_addr_t data_dma;
 
-	struct work_struct do_notify;
+	struct delayed_work do_notify;
 	unsigned long input_events;
 	unsigned long speed_events;
 	unsigned long exceed_events;
@@ -148,10 +148,10 @@
 		set_bit(1, &mc->exceed_events);
 
 	if (mc->input_events || mc->exceed_events || mc->speed_events)
-		schedule_work(&mc->do_notify);
+		schedule_delayed_work(&mc->do_notify, 0);
 
 resubmit:
-	status = usb_submit_urb(urb, SLAB_ATOMIC);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status)
 		dev_err(&mc->intf->dev,
 			"can't resubmit intr, %s-%s/motorcontrol0, status %d",
@@ -159,9 +159,10 @@
 			mc->udev->devpath, status);
 }
 
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
 {
-	struct motorcontrol *mc = data;
+	struct motorcontrol *mc =
+		container_of(work, struct motorcontrol, do_notify.work);
 	int i;
 	char sysfs_file[8];
 
@@ -337,7 +338,7 @@
 		goto out;
 
 	mc->dev_no = -1;
-	mc->data = usb_buffer_alloc(dev, URB_INT_SIZE, SLAB_ATOMIC, &mc->data_dma);
+	mc->data = usb_buffer_alloc(dev, URB_INT_SIZE, GFP_ATOMIC, &mc->data_dma);
 	if (!mc->data)
 		goto out;
 
@@ -348,7 +349,7 @@
 	mc->udev = usb_get_dev(dev);
 	mc->intf = intf;
 	mc->acceleration[0] = mc->acceleration[1] = 10;
-	INIT_WORK(&mc->do_notify, do_notify, mc);
+	INIT_DELAYED_WORK(&mc->do_notify, do_notify);
 	usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data,
 			maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
 			motorcontrol_irq, mc, endpoint->bInterval);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 194065d..fb32186 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -213,7 +213,7 @@
 
 	if (bytes < 0)
 		return NULL;
-	urb = usb_alloc_urb (0, SLAB_KERNEL);
+	urb = usb_alloc_urb (0, GFP_KERNEL);
 	if (!urb)
 		return urb;
 	usb_fill_bulk_urb (urb, udev, pipe, NULL, bytes, simple_callback, NULL);
@@ -223,7 +223,7 @@
 	urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
 	if (usb_pipein (pipe))
 		urb->transfer_flags |= URB_SHORT_NOT_OK;
-	urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL,
+	urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
 			&urb->transfer_dma);
 	if (!urb->transfer_buffer) {
 		usb_free_urb (urb);
@@ -315,7 +315,7 @@
 		init_completion (&completion);
 		if (usb_pipeout (urb->pipe))
 			simple_fill_buf (urb);
-		if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0)
+		if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0)
 			break;
 
 		/* NOTE:  no timeouts; can't be broken out of by interrupt */
@@ -374,7 +374,7 @@
 	unsigned		i;
 	unsigned		size = max;
 
-	sg = kmalloc (nents * sizeof *sg, SLAB_KERNEL);
+	sg = kmalloc (nents * sizeof *sg, GFP_KERNEL);
 	if (!sg)
 		return NULL;
 
@@ -382,7 +382,7 @@
 		char		*buf;
 		unsigned	j;
 
-		buf = kzalloc (size, SLAB_KERNEL);
+		buf = kzalloc (size, GFP_KERNEL);
 		if (!buf) {
 			free_sglist (sg, i);
 			return NULL;
@@ -428,7 +428,7 @@
 				(udev->speed == USB_SPEED_HIGH)
 					? (INTERRUPT_RATE << 3)
 					: INTERRUPT_RATE,
-				sg, nents, 0, SLAB_KERNEL);
+				sg, nents, 0, GFP_KERNEL);
 		
 		if (retval)
 			break;
@@ -819,7 +819,7 @@
 
 	/* resubmit if we need to, else mark this as done */
 	if ((status == 0) && (ctx->pending < ctx->count)) {
-		if ((status = usb_submit_urb (urb, SLAB_ATOMIC)) != 0) {
+		if ((status = usb_submit_urb (urb, GFP_ATOMIC)) != 0) {
 			dbg ("can't resubmit ctrl %02x.%02x, err %d",
 				reqp->bRequestType, reqp->bRequest, status);
 			urb->dev = NULL;
@@ -855,7 +855,7 @@
 	 * as with bulk/intr sglists, sglen is the queue depth; it also
 	 * controls which subtests run (more tests than sglen) or rerun.
 	 */
-	urb = kcalloc(param->sglen, sizeof(struct urb *), SLAB_KERNEL);
+	urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
 	if (!urb)
 		return -ENOMEM;
 	for (i = 0; i < param->sglen; i++) {
@@ -981,7 +981,7 @@
 		if (!u)
 			goto cleanup;
 
-		reqp = usb_buffer_alloc (udev, sizeof *reqp, SLAB_KERNEL,
+		reqp = usb_buffer_alloc (udev, sizeof *reqp, GFP_KERNEL,
 				&u->setup_dma);
 		if (!reqp)
 			goto cleanup;
@@ -999,7 +999,7 @@
 	context.urb = urb;
 	spin_lock_irq (&context.lock);
 	for (i = 0; i < param->sglen; i++) {
-		context.status = usb_submit_urb (urb [i], SLAB_ATOMIC);
+		context.status = usb_submit_urb (urb [i], GFP_ATOMIC);
 		if (context.status != 0) {
 			dbg ("can't submit urb[%d], status %d",
 					i, context.status);
@@ -1041,7 +1041,7 @@
 
 	// we "know" -EPIPE (stall) never happens
 	if (!status)
-		status = usb_submit_urb (urb, SLAB_ATOMIC);
+		status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status) {
 		urb->status = status;
 		complete ((struct completion *) urb->context);
@@ -1067,7 +1067,7 @@
 	 * FIXME want additional tests for when endpoint is STALLing
 	 * due to errors, or is just NAKing requests.
 	 */
-	if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0) {
+	if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) {
 		dev_dbg (&dev->intf->dev, "submit fail %d\n", retval);
 		return retval;
 	}
@@ -1251,7 +1251,7 @@
 	if (length < 1 || length > 0xffff || vary >= length)
 		return -EINVAL;
 
-	buf = kmalloc(length, SLAB_KERNEL);
+	buf = kmalloc(length, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 
@@ -1403,7 +1403,7 @@
 	maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
 	packets = (bytes + maxp - 1) / maxp;
 
-	urb = usb_alloc_urb (packets, SLAB_KERNEL);
+	urb = usb_alloc_urb (packets, GFP_KERNEL);
 	if (!urb)
 		return urb;
 	urb->dev = udev;
@@ -1411,7 +1411,7 @@
 
 	urb->number_of_packets = packets;
 	urb->transfer_buffer_length = bytes;
-	urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL,
+	urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
 			&urb->transfer_dma);
 	if (!urb->transfer_buffer) {
 		usb_free_urb (urb);
@@ -1481,7 +1481,7 @@
 	spin_lock_irq (&context.lock);
 	for (i = 0; i < param->sglen; i++) {
 		++context.pending;
-		status = usb_submit_urb (urbs [i], SLAB_ATOMIC);
+		status = usb_submit_urb (urbs [i], GFP_ATOMIC);
 		if (status < 0) {
 			ERROR (dev, "submit iso[%d], error %d\n", i, status);
 			if (i == 0) {
@@ -1900,7 +1900,7 @@
 	}
 #endif
 
-	dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev)
 		return -ENOMEM;
 	info = (struct usbtest_info *) id->driver_info;
@@ -1910,7 +1910,7 @@
 	dev->intf = intf;
 
 	/* cacheline-aligned scratch for i/o */
-	if ((dev->buf = kmalloc (TBUF_SIZE, SLAB_KERNEL)) == NULL) {
+	if ((dev->buf = kmalloc (TBUF_SIZE, GFP_KERNEL)) == NULL) {
 		kfree (dev);
 		return -ENOMEM;
 	}
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 7a2346c..05cf2c9 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -50,7 +50,7 @@
 
 #define SLAB_NAME_SZ  30
 struct mon_reader_text {
-	kmem_cache_t *e_slab;
+	struct kmem_cache *e_slab;
 	int nevents;
 	struct list_head e_list;
 	struct mon_reader r;	/* In C, parent class can be placed anywhere */
@@ -63,7 +63,7 @@
 	char slab_name[SLAB_NAME_SZ];
 };
 
-static void mon_text_ctor(void *, kmem_cache_t *, unsigned long);
+static void mon_text_ctor(void *, struct kmem_cache *, unsigned long);
 
 /*
  * mon_text_submit
@@ -147,7 +147,7 @@
 	stamp = mon_get_timestamp();
 
 	if (rp->nevents >= EVENT_MAX ||
-	    (ep = kmem_cache_alloc(rp->e_slab, SLAB_ATOMIC)) == NULL) {
+	    (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
 		rp->r.m_bus->cnt_text_lost++;
 		return;
 	}
@@ -188,7 +188,7 @@
 	struct mon_event_text *ep;
 
 	if (rp->nevents >= EVENT_MAX ||
-	    (ep = kmem_cache_alloc(rp->e_slab, SLAB_ATOMIC)) == NULL) {
+	    (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
 		rp->r.m_bus->cnt_text_lost++;
 		return;
 	}
@@ -450,7 +450,7 @@
 /*
  * Slab interface: constructor.
  */
-static void mon_text_ctor(void *mem, kmem_cache_t *slab, unsigned long sflags)
+static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sflags)
 {
 	/*
 	 * Nothing to initialize. No, really!
diff --git a/drivers/usb/net/catc.c b/drivers/usb/net/catc.c
index 907b820..4852012 100644
--- a/drivers/usb/net/catc.c
+++ b/drivers/usb/net/catc.c
@@ -345,7 +345,7 @@
 		} 
 	}
 resubmit:
-	status = usb_submit_urb (urb, SLAB_ATOMIC);
+	status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status)
 		err ("can't resubmit intr, %s-%s, status %d",
 				catc->usbdev->bus->bus_name,
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index 7c906a4..fa78326 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -222,7 +222,7 @@
 	int suspend_lowmem_ctrl;
 	int linkstate;
 	int opened;
-	struct work_struct lowmem_work;
+	struct delayed_work lowmem_work;
 
 	struct usb_device *dev;
 	struct net_device *net;
@@ -530,9 +530,10 @@
 	kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC);
 }
 
-static void kaweth_resubmit_tl(void *d)
+static void kaweth_resubmit_tl(struct work_struct *work)
 {
-	struct kaweth_device *kaweth = (struct kaweth_device *)d;
+	struct kaweth_device *kaweth =
+		container_of(work, struct kaweth_device, lowmem_work.work);
 
 	if (IS_BLOCKED(kaweth->status))
 		return;
@@ -1126,7 +1127,7 @@
 
 	/* kaweth is zeroed as part of alloc_netdev */
 
-	INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth);
+	INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
 
 	SET_MODULE_OWNER(netdev);
 
diff --git a/drivers/usb/net/net1080.c b/drivers/usb/net/net1080.c
index a774105..4936359 100644
--- a/drivers/usb/net/net1080.c
+++ b/drivers/usb/net/net1080.c
@@ -383,7 +383,7 @@
 		int			status;
 
 		/* Send a flush */
-		urb = usb_alloc_urb(0, SLAB_ATOMIC);
+		urb = usb_alloc_urb(0, GFP_ATOMIC);
 		if (!urb)
 			return;
 
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index 69eb0db..d48c024 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -856,7 +856,7 @@
 		pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
 	}
 
-	status = usb_submit_urb(urb, SLAB_ATOMIC);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status == -ENODEV)
 		netif_device_detach(pegasus->net);
 	if (status && netif_msg_timer(pegasus))
@@ -1281,9 +1281,9 @@
 static struct workqueue_struct *pegasus_workqueue = NULL;
 #define CARRIER_CHECK_DELAY (2 * HZ)
 
-static void check_carrier(void *data)
+static void check_carrier(struct work_struct *work)
 {
-	pegasus_t *pegasus = data;
+	pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
 	set_carrier(pegasus->net);
 	if (!(pegasus->flags & PEGASUS_UNPLUG)) {
 		queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
@@ -1319,7 +1319,7 @@
 
 	tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
 
-	INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus);
+	INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
 
 	pegasus->intf = intf;
 	pegasus->usb = dev;
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h
index 0064380..98f6898 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/usb/net/pegasus.h
@@ -95,7 +95,7 @@
 	int			dev_index;
 	int			intr_interval;
 	struct tasklet_struct	rx_tl;
-	struct work_struct	carrier_check;
+	struct delayed_work	carrier_check;
 	struct urb		*ctrl_urb, *rx_urb, *tx_urb, *intr_urb;
 	struct sk_buff		*rx_pool[RX_SKBS];
 	struct sk_buff		*rx_skb;
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c
index c2a28d8..99f26b3 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/usb/net/rndis_host.c
@@ -469,7 +469,7 @@
 	struct rndis_halt	*halt;
 
 	/* try to clear any rndis state/activity (no i/o from stack!) */
-	halt = kcalloc(1, sizeof *halt, SLAB_KERNEL);
+	halt = kcalloc(1, sizeof *halt, GFP_KERNEL);
 	if (halt) {
 		halt->msg_type = RNDIS_MSG_HALT;
 		halt->msg_len = ccpu2(sizeof *halt);
diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c
index 72171f9..c54235f 100644
--- a/drivers/usb/net/rtl8150.c
+++ b/drivers/usb/net/rtl8150.c
@@ -587,7 +587,7 @@
 	}
 
 resubmit:
-	status = usb_submit_urb (urb, SLAB_ATOMIC);
+	status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status == -ENODEV)
 		netif_device_detach(dev->netdev);
 	else if (status)
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 7672e11..6e39e99 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -179,9 +179,9 @@
 	period = max ((int) dev->status->desc.bInterval,
 		(dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
 
-	buf = kmalloc (maxp, SLAB_KERNEL);
+	buf = kmalloc (maxp, GFP_KERNEL);
 	if (buf) {
-		dev->interrupt = usb_alloc_urb (0, SLAB_KERNEL);
+		dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
 		if (!dev->interrupt) {
 			kfree (buf);
 			return -ENOMEM;
@@ -782,9 +782,10 @@
  * especially now that control transfers can be queued.
  */
 static void
-kevent (void *data)
+kevent (struct work_struct *work)
 {
-	struct usbnet		*dev = data;
+	struct usbnet		*dev =
+		container_of(work, struct usbnet, kevent);
 	int			status;
 
 	/* usb_clear_halt() needs a thread context */
@@ -1146,7 +1147,7 @@
 	skb_queue_head_init (&dev->done);
 	dev->bh.func = usbnet_bh;
 	dev->bh.data = (unsigned long) dev;
-	INIT_WORK (&dev->kevent, kevent, dev);
+	INIT_WORK (&dev->kevent, kevent);
 	dev->delay.function = usbnet_bh;
 	dev->delay.data = (unsigned long) dev;
 	init_timer (&dev->delay);
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b1b5707..86bcf63 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -92,6 +92,7 @@
 	struct circ_buf *rx_buf;	/* read buffer */
 	int rx_flags;			/* for throttilng */
 	struct work_struct rx_work;	/* work cue for the receiving line */
+	struct usb_serial_port *port;	/* USB port with which associated */
 };
 
 /* Private methods */
@@ -251,10 +252,11 @@
 	schedule_work(&port->work);
 }
 
-static void aircable_read(void *params)
+static void aircable_read(struct work_struct *work)
 {
-	struct usb_serial_port *port = params;
-	struct aircable_private *priv = usb_get_serial_port_data(port);
+	struct aircable_private *priv =
+		container_of(work, struct aircable_private, rx_work);
+	struct usb_serial_port *port = priv->port;
 	struct tty_struct *tty;
 	unsigned char *data;
 	int count;
@@ -349,7 +351,8 @@
 	}
 
 	priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
-	INIT_WORK(&priv->rx_work, aircable_read, port);
+	priv->port = port;
+	INIT_WORK(&priv->rx_work, aircable_read);
 
 	usb_set_serial_port_data(serial->port[0], priv);
 
@@ -516,7 +519,7 @@
 					package_length - shift);
 			}
 		}
-		aircable_read(port);
+		aircable_read(&priv->rx_work);
 	}
 
 	/* Schedule the next read _if_ we are still open */
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 5e3ac28..83d0e21 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -430,13 +430,14 @@
 	int dp_in_close;			/* close in progress */
 	wait_queue_head_t dp_close_wait;	/* wait queue for close */
 	struct work_struct dp_wakeup_work;
+	struct usb_serial_port *dp_port;
 };
 
 
 /* Local Function Declarations */
 
 static void digi_wakeup_write( struct usb_serial_port *port );
-static void digi_wakeup_write_lock(void *);
+static void digi_wakeup_write_lock(struct work_struct *work);
 static int digi_write_oob_command( struct usb_serial_port *port,
 	unsigned char *buf, int count, int interruptible );
 static int digi_write_inb_command( struct usb_serial_port *port,
@@ -598,11 +599,12 @@
 *  on writes.
 */
 
-static void digi_wakeup_write_lock(void *arg)
+static void digi_wakeup_write_lock(struct work_struct *work)
 {
-	struct usb_serial_port *port = arg;
+	struct digi_port *priv =
+		container_of(work, struct digi_port, dp_wakeup_work);
+	struct usb_serial_port *port = priv->dp_port;
 	unsigned long flags;
-	struct digi_port *priv = usb_get_serial_port_data(port);
 
 
 	spin_lock_irqsave( &priv->dp_port_lock, flags );
@@ -1702,8 +1704,8 @@
 		init_waitqueue_head( &priv->dp_flush_wait );
 		priv->dp_in_close = 0;
 		init_waitqueue_head( &priv->dp_close_wait );
-		INIT_WORK(&priv->dp_wakeup_work,
-				digi_wakeup_write_lock, serial->port[i]);
+		INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
+		priv->dp_port = serial->port[i];
 
 		/* initialize write wait queue for this port */
 		init_waitqueue_head( &serial->port[i]->write_wait );
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89ce277..72e4d48 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -559,7 +559,8 @@
 	char prev_status, diff_status;        /* Used for TIOCMIWAIT */
 	__u8 rx_flags;		/* receive state flags (throttling) */
 	spinlock_t rx_lock;	/* spinlock for receive state */
-	struct work_struct rx_work;
+	struct delayed_work rx_work;
+	struct usb_serial_port *port;
 	int rx_processed;
 	unsigned long rx_bytes;
 
@@ -593,7 +594,7 @@
 static int  ftdi_chars_in_buffer	(struct usb_serial_port *port);
 static void ftdi_write_bulk_callback	(struct urb *urb);
 static void ftdi_read_bulk_callback	(struct urb *urb);
-static void ftdi_process_read		(void *param);
+static void ftdi_process_read		(struct work_struct *work);
 static void ftdi_set_termios		(struct usb_serial_port *port, struct termios * old);
 static int  ftdi_tiocmget               (struct usb_serial_port *port, struct file *file);
 static int  ftdi_tiocmset		(struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
@@ -1201,7 +1202,8 @@
 		port->read_urb->transfer_buffer_length = BUFSZ;
 	}
 
-	INIT_WORK(&priv->rx_work, ftdi_process_read, port);
+	INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
+	priv->port = port;
 
 	/* Free port's existing write urb and transfer buffer. */
 	if (port->write_urb) {
@@ -1640,17 +1642,18 @@
 	priv->rx_bytes += countread;
 	spin_unlock_irqrestore(&priv->rx_lock, flags);
 
-	ftdi_process_read(port);
+	ftdi_process_read(&priv->rx_work.work);
 
 } /* ftdi_read_bulk_callback */
 
 
-static void ftdi_process_read (void *param)
+static void ftdi_process_read (struct work_struct *work)
 { /* ftdi_process_read */
-	struct usb_serial_port *port = (struct usb_serial_port*)param;
+	struct ftdi_private *priv =
+		container_of(work, struct ftdi_private, rx_work.work);
+	struct usb_serial_port *port = priv->port;
 	struct urb *urb;
 	struct tty_struct *tty;
-	struct ftdi_private *priv;
 	char error_flag;
 	unsigned char *data;
 
@@ -2179,7 +2182,7 @@
 	spin_unlock_irqrestore(&priv->rx_lock, flags);
 
 	if (actually_throttled)
-		schedule_work(&priv->rx_work);
+		schedule_delayed_work(&priv->rx_work, 0);
 }
 
 static int __init ftdi_init (void)
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 9090051..e09a0bf 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -120,6 +120,8 @@
 	int			tx_throttled;
 	struct work_struct			wakeup_work;
 	struct work_struct			unthrottle_work;
+	struct usb_serial	*serial;
+	struct usb_serial_port	*port;
 };
 
 
@@ -175,9 +177,11 @@
 };
 #endif
 
-static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
+static void keyspan_pda_wakeup_write(struct work_struct *work)
 {
-
+	struct keyspan_pda_private *priv =
+		container_of(work, struct keyspan_pda_private, wakeup_work);
+	struct usb_serial_port *port = priv->port;
 	struct tty_struct *tty = port->tty;
 
 	/* wake up port processes */
@@ -187,8 +191,11 @@
 	tty_wakeup(tty);
 }
 
-static void keyspan_pda_request_unthrottle( struct usb_serial *serial )
+static void keyspan_pda_request_unthrottle(struct work_struct *work)
 {
+	struct keyspan_pda_private *priv =
+		container_of(work, struct keyspan_pda_private, unthrottle_work);
+	struct usb_serial *serial = priv->serial;
 	int result;
 
 	dbg(" request_unthrottle");
@@ -765,11 +772,10 @@
 		return (1); /* error */
 	usb_set_serial_port_data(serial->port[0], priv);
 	init_waitqueue_head(&serial->port[0]->write_wait);
-	INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write,
-			(void *)(serial->port[0]));
-	INIT_WORK(&priv->unthrottle_work,
-			(void *)keyspan_pda_request_unthrottle,
-			(void *)(serial));
+	INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
+	INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
+	priv->serial = serial;
+	priv->port = serial->port[0];
 	return (0);
 }
 
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 82cd15b..70f93b1 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -363,7 +363,7 @@
 
 	/* Initialising the write urb pool */
 	for (j = 0; j < NUM_URBS; ++j) {
-		urb = usb_alloc_urb(0,SLAB_ATOMIC);
+		urb = usb_alloc_urb(0,GFP_ATOMIC);
 		mos7720_port->write_urb_pool[j] = urb;
 
 		if (urb == NULL) {
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 02c89e1..5432c63 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -826,7 +826,7 @@
 
 	/* Initialising the write urb pool */
 	for (j = 0; j < NUM_URBS; ++j) {
-		urb = usb_alloc_urb(0, SLAB_ATOMIC);
+		urb = usb_alloc_urb(0, GFP_ATOMIC);
 		mos7840_port->write_urb_pool[j] = urb;
 
 		if (urb == NULL) {
@@ -2786,7 +2786,7 @@
 				    i + 1, status);
 
 		}
-		mos7840_port->control_urb = usb_alloc_urb(0, SLAB_ATOMIC);
+		mos7840_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC);
 		mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
 
 	}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index c1257d5..3d5072f 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -533,9 +533,10 @@
 	schedule_work(&port->work);
 }
 
-static void usb_serial_port_work(void *private)
+static void usb_serial_port_work(struct work_struct *work)
 {
-	struct usb_serial_port *port = private;
+	struct usb_serial_port *port =
+		container_of(work, struct usb_serial_port, work);
 	struct tty_struct *tty;
 
 	dbg("%s - port %d", __FUNCTION__, port->number);
@@ -799,7 +800,7 @@
 		port->serial = serial;
 		spin_lock_init(&port->lock);
 		mutex_init(&port->mutex);
-		INIT_WORK(&port->work, usb_serial_port_work, port);
+		INIT_WORK(&port->work, usb_serial_port_work);
 		serial->port[i] = port;
 	}
 
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 4d1cd7a..154c7d2 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -227,6 +227,7 @@
 	struct list_head	rx_urbs_submitted;
 	struct list_head	rx_urb_q;
 	struct work_struct	rx_work;
+	struct usb_serial_port	*port;
 	struct list_head	tx_urbs_free;
 	struct list_head	tx_urbs_submitted;
 };
@@ -241,7 +242,7 @@
 static int start_port_read(struct usb_serial_port *port);
 static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head);
 static struct list_head *list_first(struct list_head *head);
-static void rx_data_softint(void *private);
+static void rx_data_softint(struct work_struct *work);
 
 static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize);
 static int firm_open(struct usb_serial_port *port);
@@ -424,7 +425,8 @@
 		spin_lock_init(&info->lock);
 		info->flags = 0;
 		info->mcr = 0;
-		INIT_WORK(&info->rx_work, rx_data_softint, port);
+		INIT_WORK(&info->rx_work, rx_data_softint);
+		info->port = port;
 
 		INIT_LIST_HEAD(&info->rx_urbs_free);
 		INIT_LIST_HEAD(&info->rx_urbs_submitted);
@@ -949,7 +951,7 @@
 	spin_unlock_irqrestore(&info->lock, flags);
 
 	if (actually_throttled)
-		rx_data_softint(port);
+		rx_data_softint(&info->rx_work);
 
 	return;
 }
@@ -1400,10 +1402,11 @@
 }
 
 
-static void rx_data_softint(void *private)
+static void rx_data_softint(struct work_struct *work)
 {
-	struct usb_serial_port *port = (struct usb_serial_port *)private;
-	struct whiteheat_private *info = usb_get_serial_port_data(port);
+	struct whiteheat_private *info =
+		container_of(work, struct whiteheat_private, rx_work);
+	struct usb_serial_port *port = info->port;
 	struct tty_struct *tty = port->tty;
 	struct whiteheat_urb_wrap *wrap;
 	struct urb *urb;
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 3a158d5..e565d3d 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -76,7 +76,7 @@
 	input_sync(dev);
 
 resubmit:
-	status = usb_submit_urb (urb, SLAB_ATOMIC);
+	status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status)
 		err ("can't resubmit intr, %s-%s/input0, status %d",
 			onetouch->udev->bus->bus_name,
@@ -154,7 +154,7 @@
 		goto fail1;
 
 	onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
-					  SLAB_ATOMIC, &onetouch->data_dma);
+					  GFP_ATOMIC, &onetouch->data_dma);
 	if (!onetouch->data)
 		goto fail1;
 
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 47644b5..323293a 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -427,7 +427,7 @@
 	US_DEBUGP("%s: xfer %u bytes, %d entries\n", __FUNCTION__,
 			length, num_sg);
 	result = usb_sg_init(&us->current_sg, us->pusb_dev, pipe, 0,
-			sg, num_sg, length, SLAB_NOIO);
+			sg, num_sg, length, GFP_NOIO);
 	if (result) {
 		US_DEBUGP("usb_sg_init returned %d\n", result);
 		return USB_STOR_XFER_ERROR;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index b401084..7064450 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -49,7 +49,7 @@
 
 #include <linux/sched.h>
 #include <linux/errno.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 302174b..31f476a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -383,9 +383,9 @@
 		softback_top = 0;
 }
 
-static void fb_flashcursor(void *private)
+static void fb_flashcursor(struct work_struct *work)
 {
-	struct fb_info *info = private;
+	struct fb_info *info = container_of(work, struct fb_info, queue);
 	struct fbcon_ops *ops = info->fbcon_par;
 	struct display *p;
 	struct vc_data *vc = NULL;
@@ -442,7 +442,7 @@
 	if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
 	    !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
 		if (!info->queue.func)
-			INIT_WORK(&info->queue, fb_flashcursor, info);
+			INIT_WORK(&info->queue, fb_flashcursor);
 
 		init_timer(&ops->cursor_timer);
 		ops->cursor_timer.function = cursor_timer_handler;
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 0d3643f..a454dcb 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -380,7 +380,7 @@
 }
 
 static struct pci_device_id gxfb_id_table[] = {
-	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_VIDEO,
+	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO,
 	  PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16,
 	  0xff0000, 0 },
 	{ 0, }
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 8a8ae55..38eb0b6 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -964,9 +964,10 @@
  * Our LCD controller task (which is called when we blank or unblank)
  * via keventd.
  */
-static void pxafb_task(void *dummy)
+static void pxafb_task(struct work_struct *work)
 {
-	struct pxafb_info *fbi = dummy;
+	struct pxafb_info *fbi =
+		container_of(work, struct pxafb_info, task);
 	u_int state = xchg(&fbi->task_state, -1);
 
 	set_ctrlr_state(fbi, state);
@@ -1159,7 +1160,7 @@
 	}
 
 	init_waitqueue_head(&fbi->ctrlr_wait);
-	INIT_WORK(&fbi->task, pxafb_task, fbi);
+	INIT_WORK(&fbi->task, pxafb_task);
 	init_MUTEX(&fbi->ctrlr_sem);
 
 	return fbi;
diff --git a/drivers/w1/Makefile b/drivers/w1/Makefile
index 93845a2..6bb0b54 100644
--- a/drivers/w1/Makefile
+++ b/drivers/w1/Makefile
@@ -2,10 +2,6 @@
 # Makefile for the Dallas's 1-wire bus.
 #
 
-ifeq ($(CONFIG_W1_DS2433_CRC), y)
-EXTRA_CFLAGS	+= -DCONFIG_W1_F23_CRC
-endif
-
 obj-$(CONFIG_W1)	+= wire.o
 wire-objs		:= w1.o w1_int.o w1_family.o w1_netlink.o w1_io.o
 
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 70e21e2..725dcfd 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -2,10 +2,6 @@
 # Makefile for the Dallas's 1-wire slaves.
 #
 
-ifeq ($(CONFIG_W1_SLAVE_DS2433_CRC), y)
-EXTRA_CFLAGS += -DCONFIG_W1_F23_CRC
-endif
-
 obj-$(CONFIG_W1_SLAVE_THERM)	+= w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)	+= w1_smem.o
 obj-$(CONFIG_W1_SLAVE_DS2433)	+= w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 2ac238f..8ea17a5 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -13,7 +13,7 @@
 #include <linux/device.h>
 #include <linux/types.h>
 #include <linux/delay.h>
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 #include <linux/crc16.h>
 
 #define CRC16_INIT		0
@@ -62,7 +62,7 @@
 	return count;
 }
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
 				int block)
 {
@@ -89,13 +89,13 @@
 
 	return 0;
 }
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off,
 			       size_t count)
 {
 	struct w1_slave *sl = kobj_to_w1_slave(kobj);
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	struct w1_f23_data *data = sl->family_data;
 	int i, min_page, max_page;
 #else
@@ -107,7 +107,7 @@
 
 	mutex_lock(&sl->master->mutex);
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 
 	min_page = (off >> W1_PAGE_BITS);
 	max_page = (off + count - 1) >> W1_PAGE_BITS;
@@ -119,7 +119,7 @@
 	}
 	memcpy(buf, &data->memory[off], count);
 
-#else 	/* CONFIG_W1_F23_CRC */
+#else 	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 	/* read directly from the EEPROM */
 	if (w1_reset_select_slave(sl)) {
@@ -133,7 +133,7 @@
 	w1_write_block(sl->master, wrbuf, 3);
 	w1_read_block(sl->master, buf, count);
 
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 out_up:
 	mutex_unlock(&sl->master->mutex);
@@ -208,7 +208,7 @@
 	if ((count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE)) == 0)
 		return 0;
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	/* can only write full blocks in cached mode */
 	if ((off & W1_PAGE_MASK) || (count & W1_PAGE_MASK)) {
 		dev_err(&sl->dev, "invalid offset/count off=%d cnt=%zd\n",
@@ -223,7 +223,7 @@
 			return -EINVAL;
 		}
 	}
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 	mutex_lock(&sl->master->mutex);
 
@@ -262,7 +262,7 @@
 static int w1_f23_add_slave(struct w1_slave *sl)
 {
 	int err;
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	struct w1_f23_data *data;
 
 	data = kmalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
@@ -271,24 +271,24 @@
 	memset(data, 0, sizeof(struct w1_f23_data));
 	sl->family_data = data;
 
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 	err = sysfs_create_bin_file(&sl->dev.kobj, &w1_f23_bin_attr);
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	if (err)
 		kfree(data);
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 	return err;
 }
 
 static void w1_f23_remove_slave(struct w1_slave *sl)
 {
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	kfree(sl->family_data);
 	sl->family_data = NULL;
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 	sysfs_remove_bin_file(&sl->dev.kobj, &w1_f23_bin_attr);
 }
 
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index de3e979..63c0724 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -31,6 +31,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <asm/atomic.h>
 
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 90a79c7..944273c 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -110,8 +110,8 @@
 };
 
 static int v9fs_poll_proc(void *);
-static void v9fs_read_work(void *);
-static void v9fs_write_work(void *);
+static void v9fs_read_work(struct work_struct *work);
+static void v9fs_write_work(struct work_struct *work);
 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
 			  poll_table * p);
 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
@@ -297,8 +297,8 @@
 	m->rbuf = NULL;
 	m->wpos = m->wsize = 0;
 	m->wbuf = NULL;
-	INIT_WORK(&m->rq, v9fs_read_work, m);
-	INIT_WORK(&m->wq, v9fs_write_work, m);
+	INIT_WORK(&m->rq, v9fs_read_work);
+	INIT_WORK(&m->wq, v9fs_write_work);
 	m->wsched = 0;
 	memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
 	m->poll_task = NULL;
@@ -458,13 +458,13 @@
 /**
  * v9fs_write_work - called when a transport can send some data
  */
-static void v9fs_write_work(void *a)
+static void v9fs_write_work(struct work_struct *work)
 {
 	int n, err;
 	struct v9fs_mux_data *m;
 	struct v9fs_req *req;
 
-	m = a;
+	m = container_of(work, struct v9fs_mux_data, wq);
 
 	if (m->err < 0) {
 		clear_bit(Wworksched, &m->wsched);
@@ -564,7 +564,7 @@
 /**
  * v9fs_read_work - called when there is some data to be read from a transport
  */
-static void v9fs_read_work(void *a)
+static void v9fs_read_work(struct work_struct *work)
 {
 	int n, err;
 	struct v9fs_mux_data *m;
@@ -572,7 +572,7 @@
 	struct v9fs_fcall *rcall;
 	char *rbuf;
 
-	m = a;
+	m = container_of(work, struct v9fs_mux_data, rq);
 
 	if (m->err < 0)
 		return;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 5241c60..18f26cd 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -256,7 +256,7 @@
 v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm,
 	u8 mode, char *extension, u32 *fidp, struct v9fs_qid *qid, u32 *iounit)
 {
-	u32 fid;
+	int fid;
 	int err;
 	struct v9fs_fcall *fcall;
 
@@ -310,7 +310,7 @@
 v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry)
 {
 	int err;
-	u32 nfid;
+	int nfid;
 	struct v9fs_fid *ret;
 	struct v9fs_fcall *fcall;
 
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 9ade139..5023351 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -36,7 +36,7 @@
 	va_list args;
 
 	va_start(args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 
 	printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n",
@@ -212,12 +212,12 @@
 	return 0;
 }
 
-static kmem_cache_t *adfs_inode_cachep;
+static struct kmem_cache *adfs_inode_cachep;
 
 static struct inode *adfs_alloc_inode(struct super_block *sb)
 {
 	struct adfs_inode_info *ei;
-	ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, SLAB_KERNEL);
+	ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -228,7 +228,7 @@
 	kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
 
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index ccd624e..f4de4b9 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -445,7 +445,7 @@
 	va_list	 args;
 
 	va_start(args,fmt);
-	vsprintf(ErrorBuffer,fmt,args);
+	vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
 	va_end(args);
 
 	printk(KERN_CRIT "AFFS error (device %s): %s(): %s\n", sb->s_id,
@@ -461,7 +461,7 @@
 	va_list	 args;
 
 	va_start(args,fmt);
-	vsprintf(ErrorBuffer,fmt,args);
+	vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
 	va_end(args);
 
 	printk(KERN_WARNING "AFFS warning (device %s): %s(): %s\n", sb->s_id,
diff --git a/fs/affs/bitmap.c b/fs/affs/bitmap.c
index b0b9536..b330009 100644
--- a/fs/affs/bitmap.c
+++ b/fs/affs/bitmap.c
@@ -289,12 +289,11 @@
 	sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved +
 				 sbi->s_bmap_bits - 1) / sbi->s_bmap_bits;
 	size = sbi->s_bmap_count * sizeof(*bm);
-	bm = sbi->s_bitmap = kmalloc(size, GFP_KERNEL);
+	bm = sbi->s_bitmap = kzalloc(size, GFP_KERNEL);
 	if (!sbi->s_bitmap) {
 		printk(KERN_ERR "AFFS: Bitmap allocation failed\n");
 		return -ENOMEM;
 	}
-	memset(sbi->s_bitmap, 0, size);
 
 	bmap_blk = (__be32 *)sbi->s_root_bh->b_data;
 	blk = sb->s_blocksize / 4 - 49;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 5ea72c3..3de93e7 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -66,12 +66,12 @@
 	pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean);
 }
 
-static kmem_cache_t * affs_inode_cachep;
+static struct kmem_cache * affs_inode_cachep;
 
 static struct inode *affs_alloc_inode(struct super_block *sb)
 {
 	struct affs_inode_info *ei;
-	ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, SLAB_KERNEL);
+	ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	ei->vfs_inode.i_version = 1;
@@ -83,7 +83,7 @@
 	kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct affs_inode_info *ei = (struct affs_inode_info *) foo;
 
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c
index f09a794..615df24 100644
--- a/fs/afs/kafsasyncd.c
+++ b/fs/afs/kafsasyncd.c
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/freezer.h>
 #include "cell.h"
 #include "server.h"
 #include "volume.h"
diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c
index 65bc05a..694344e 100644
--- a/fs/afs/kafstimod.c
+++ b/fs/afs/kafstimod.c
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/freezer.h>
 #include "cell.h"
 #include "volume.h"
 #include "kafstimod.h"
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 22afaae..44aff81 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -55,13 +55,12 @@
 	_enter("%p,%08x,", cell, ntohl(addr->s_addr));
 
 	/* allocate and initialise a server record */
-	server = kmalloc(sizeof(struct afs_server), GFP_KERNEL);
+	server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
 	if (!server) {
 		_leave(" = -ENOMEM");
 		return -ENOMEM;
 	}
 
-	memset(server, 0, sizeof(struct afs_server));
 	atomic_set(&server->usage, 1);
 
 	INIT_LIST_HEAD(&server->link);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 67d1f5c..18d9b77 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -35,7 +35,7 @@
 	struct afs_volume	*volume;
 };
 
-static void afs_i_init_once(void *foo, kmem_cache_t *cachep,
+static void afs_i_init_once(void *foo, struct kmem_cache *cachep,
 			    unsigned long flags);
 
 static int afs_get_sb(struct file_system_type *fs_type,
@@ -65,7 +65,7 @@
 	.put_super	= afs_put_super,
 };
 
-static kmem_cache_t *afs_inode_cachep;
+static struct kmem_cache *afs_inode_cachep;
 static atomic_t afs_count_active_inodes;
 
 /*****************************************************************************/
@@ -242,14 +242,12 @@
 	kenter("");
 
 	/* allocate a superblock info record */
-	as = kmalloc(sizeof(struct afs_super_info), GFP_KERNEL);
+	as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
 	if (!as) {
 		_leave(" = -ENOMEM");
 		return -ENOMEM;
 	}
 
-	memset(as, 0, sizeof(struct afs_super_info));
-
 	afs_get_volume(params->volume);
 	as->volume = params->volume;
 
@@ -384,7 +382,7 @@
 /*
  * initialise an inode cache slab element prior to any use
  */
-static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep,
+static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
 			    unsigned long flags)
 {
 	struct afs_vnode *vnode = (struct afs_vnode *) _vnode;
@@ -412,7 +410,7 @@
 	struct afs_vnode *vnode;
 
 	vnode = (struct afs_vnode *)
-		kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL);
+		kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
 	if (!vnode)
 		return NULL;
 
diff --git a/fs/aio.c b/fs/aio.c
index 277a5f2..d3a6ec2 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -47,19 +47,19 @@
 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
 /*----end sysctl variables---*/
 
-static kmem_cache_t	*kiocb_cachep;
-static kmem_cache_t	*kioctx_cachep;
+static struct kmem_cache	*kiocb_cachep;
+static struct kmem_cache	*kioctx_cachep;
 
 static struct workqueue_struct *aio_wq;
 
 /* Used for rare fput completion. */
-static void aio_fput_routine(void *);
-static DECLARE_WORK(fput_work, aio_fput_routine, NULL);
+static void aio_fput_routine(struct work_struct *);
+static DECLARE_WORK(fput_work, aio_fput_routine);
 
 static DEFINE_SPINLOCK(fput_lock);
 static LIST_HEAD(fput_head);
 
-static void aio_kick_handler(void *);
+static void aio_kick_handler(struct work_struct *);
 static void aio_queue_work(struct kioctx *);
 
 /* aio_setup
@@ -227,7 +227,7 @@
 
 	INIT_LIST_HEAD(&ctx->active_reqs);
 	INIT_LIST_HEAD(&ctx->run_list);
-	INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
+	INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
 
 	if (aio_setup_ring(ctx) < 0)
 		goto out_freectx;
@@ -469,7 +469,7 @@
 		wake_up(&ctx->wait);
 }
 
-static void aio_fput_routine(void *data)
+static void aio_fput_routine(struct work_struct *data)
 {
 	spin_lock_irq(&fput_lock);
 	while (likely(!list_empty(&fput_head))) {
@@ -666,17 +666,6 @@
 	ssize_t (*retry)(struct kiocb *);
 	ssize_t ret;
 
-	if (iocb->ki_retried++ > 1024*1024) {
-		printk("Maximal retry count.  Bytes done %Zd\n",
-			iocb->ki_nbytes - iocb->ki_left);
-		return -EAGAIN;
-	}
-
-	if (!(iocb->ki_retried & 0xff)) {
-		pr_debug("%ld retry: %zd of %zd\n", iocb->ki_retried,
-			iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
-	}
-
 	if (!(retry = iocb->ki_retry)) {
 		printk("aio_run_iocb: iocb->ki_retry = NULL\n");
 		return 0;
@@ -857,9 +846,9 @@
  *      space.
  * Run on aiod's context.
  */
-static void aio_kick_handler(void *data)
+static void aio_kick_handler(struct work_struct *work)
 {
-	struct kioctx *ctx = data;
+	struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
 	mm_segment_t oldfs = get_fs();
 	int requeue;
 
@@ -874,7 +863,7 @@
 	 * we're in a worker thread already, don't use queue_delayed_work,
 	 */
 	if (requeue)
-		queue_work(aio_wq, &ctx->wq);
+		queue_delayed_work(aio_wq, &ctx->wq, 0);
 }
 
 
@@ -1005,9 +994,6 @@
 	kunmap_atomic(ring, KM_IRQ1);
 
 	pr_debug("added to ring %p at [%lu]\n", iocb, tail);
-
-	pr_debug("%ld retries: %zd of %zd\n", iocb->ki_retried,
-		iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
 put_rq:
 	/* everything turned out well, dispose of the aiocb. */
 	ret = __aio_put_req(ctx, iocb);
@@ -1413,7 +1399,6 @@
 	kiocb->ki_iovec->iov_len = kiocb->ki_left;
 	kiocb->ki_nr_segs = 1;
 	kiocb->ki_cur_seg = 0;
-	kiocb->ki_nbytes = kiocb->ki_left;
 	return 0;
 }
 
@@ -1591,7 +1576,6 @@
 	req->ki_opcode = iocb->aio_lio_opcode;
 	init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
 	INIT_LIST_HEAD(&req->ki_wait.task_list);
-	req->ki_retried = 0;
 
 	ret = aio_setup_iocb(req);
 
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 38ede5c..f968d13 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -28,10 +28,11 @@
 	/*
 	 * In the event of a failure in get_sb_nodev the superblock
 	 * info is not present so nothing else has been setup, so
-	 * just exit when we are called from deactivate_super.
+	 * just call kill_anon_super when we are called from
+	 * deactivate_super.
 	 */
 	if (!sbi)
-		return;
+		goto out_kill_sb;
 
 	if ( !sbi->catatonic )
 		autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */
@@ -44,6 +45,7 @@
 
 	kfree(sb->s_fs_info);
 
+out_kill_sb:
 	DPRINTK(("autofs: shutting down\n"));
 	kill_anon_super(sb);
 }
@@ -209,7 +211,6 @@
 fail_free:
 	kfree(sbi);
 	s->s_fs_info = NULL;
-	kill_anon_super(s);
 fail_unlock:
 	return -EINVAL;
 }
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index ce7c0f1..9c48250 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -152,10 +152,11 @@
 	/*
 	 * In the event of a failure in get_sb_nodev the superblock
 	 * info is not present so nothing else has been setup, so
-	 * just exit when we are called from deactivate_super.
+	 * just call kill_anon_super when we are called from
+	 * deactivate_super.
 	 */
 	if (!sbi)
-		return;
+		goto out_kill_sb;
 
 	sb->s_fs_info = NULL;
 
@@ -167,6 +168,7 @@
 
 	kfree(sbi);
 
+out_kill_sb:
 	DPRINTK("shutting down");
 	kill_anon_super(sb);
 }
@@ -426,7 +428,6 @@
 fail_free:
 	kfree(sbi);
 	s->s_fs_info = NULL;
-	kill_anon_super(s);
 fail_unlock:
 	return -EINVAL;
 }
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 07f7144..bce402e 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -61,7 +61,7 @@
 };
 
 /* slab cache for befs_inode_info objects */
-static kmem_cache_t *befs_inode_cachep;
+static struct kmem_cache *befs_inode_cachep;
 
 static const struct file_operations befs_dir_operations = {
 	.read		= generic_read_dir,
@@ -277,7 +277,7 @@
 {
         struct befs_inode_info *bi;
         bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep,
-							SLAB_KERNEL);
+							GFP_KERNEL);
         if (!bi)
                 return NULL;
         return &bi->vfs_inode;
@@ -289,7 +289,7 @@
         kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
         struct befs_inode_info *bi = (struct befs_inode_info *) foo;
 	
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index ed27ffb..eac175e 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -228,12 +228,12 @@
 	unlock_kernel();
 }
 
-static kmem_cache_t * bfs_inode_cachep;
+static struct kmem_cache * bfs_inode_cachep;
 
 static struct inode *bfs_alloc_inode(struct super_block *sb)
 {
 	struct bfs_inode_info *bi;
-	bi = kmem_cache_alloc(bfs_inode_cachep, SLAB_KERNEL);
+	bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL);
 	if (!bi)
 		return NULL;
 	return &bi->vfs_inode;
@@ -244,7 +244,7 @@
 	kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct bfs_inode_info *bi = foo;
 
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index cc72bb4..be5869d 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -47,10 +47,6 @@
 static int load_elf_library(struct file *);
 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
 
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
-
 /*
  * If we don't support core dumping, then supply a NULL so we
  * don't even try.
@@ -243,8 +239,9 @@
 	if (interp_aout) {
 		argv = sp + 2;
 		envp = argv + argc + 1;
-		__put_user((elf_addr_t)(unsigned long)argv, sp++);
-		__put_user((elf_addr_t)(unsigned long)envp, sp++);
+		if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
+		    __put_user((elf_addr_t)(unsigned long)envp, sp++))
+			return -EFAULT;
 	} else {
 		argv = sp;
 		envp = argv + argc + 1;
@@ -254,7 +251,8 @@
 	p = current->mm->arg_end = current->mm->arg_start;
 	while (argc-- > 0) {
 		size_t len;
-		__put_user((elf_addr_t)p, argv++);
+		if (__put_user((elf_addr_t)p, argv++))
+			return -EFAULT;
 		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
 		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
 			return 0;
@@ -265,7 +263,8 @@
 	current->mm->arg_end = current->mm->env_start = p;
 	while (envc-- > 0) {
 		size_t len;
-		__put_user((elf_addr_t)p, envp++);
+		if (__put_user((elf_addr_t)p, envp++))
+			return -EFAULT;
 		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
 		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
 			return 0;
@@ -545,7 +544,7 @@
 	unsigned long reloc_func_desc = 0;
 	char passed_fileno[6];
 	struct files_struct *files;
-	int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
+	int executable_stack = EXSTACK_DEFAULT;
 	unsigned long def_flags = 0;
 	struct {
 		struct elfhdr elf_ex;
@@ -708,7 +707,6 @@
 				executable_stack = EXSTACK_DISABLE_X;
 			break;
 		}
-	have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
 
 	/* Some simple consistency checks for the interpreter */
 	if (elf_interpreter) {
@@ -856,7 +854,13 @@
 			 * default mmap base, as well as whatever program they
 			 * might try to exec.  This is because the brk will
 			 * follow the loader, and is not movable.  */
-			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+			if (current->flags & PF_RANDOMIZE)
+				load_bias = randomize_range(0x10000,
+							    ELF_ET_DYN_BASE,
+							    0);
+			else
+				load_bias = ELF_ET_DYN_BASE;
+			load_bias = ELF_PAGESTART(load_bias - vaddr);
 		}
 
 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index f86d5c9..ed9a61c 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -40,9 +40,6 @@
 #include <asm/pgalloc.h>
 
 typedef char *elf_caddr_t;
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
 
 #if 0
 #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
diff --git a/fs/bio.c b/fs/bio.c
index aa4d09b..7ec737e 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
 
 #define BIO_POOL_SIZE 256
 
-static kmem_cache_t *bio_slab __read_mostly;
+static struct kmem_cache *bio_slab __read_mostly;
 
 #define BIOVEC_NR_POOLS 6
 
@@ -44,7 +44,7 @@
 struct biovec_slab {
 	int nr_vecs;
 	char *name; 
-	kmem_cache_t *slab;
+	struct kmem_cache *slab;
 };
 
 /*
@@ -940,16 +940,16 @@
  * run one bio_put() against the BIO.
  */
 
-static void bio_dirty_fn(void *data);
+static void bio_dirty_fn(struct work_struct *work);
 
-static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
 static DEFINE_SPINLOCK(bio_dirty_lock);
 static struct bio *bio_dirty_list;
 
 /*
  * This runs in process context
  */
-static void bio_dirty_fn(void *data)
+static void bio_dirty_fn(struct work_struct *work)
 {
 	unsigned long flags;
 	struct bio *bio;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 36c0e7a..13816b4d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -235,11 +235,11 @@
  */
 
 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
-static kmem_cache_t * bdev_cachep __read_mostly;
+static struct kmem_cache * bdev_cachep __read_mostly;
 
 static struct inode *bdev_alloc_inode(struct super_block *sb)
 {
-	struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
+	struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -253,7 +253,7 @@
 	kmem_cache_free(bdev_cachep, bdi);
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct bdev_inode *ei = (struct bdev_inode *) foo;
 	struct block_device *bdev = &ei->bdev;
diff --git a/fs/buffer.c b/fs/buffer.c
index 35527dc..517860f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2908,7 +2908,7 @@
 /*
  * Buffer-head allocation
  */
-static kmem_cache_t *bh_cachep;
+static struct kmem_cache *bh_cachep;
 
 /*
  * Once the number of bh's in the machine exceeds this level, we start
@@ -2961,7 +2961,7 @@
 EXPORT_SYMBOL(free_buffer_head);
 
 static void
-init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
+init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
 {
 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
 			    SLAB_CTOR_CONSTRUCTOR) {
@@ -2972,7 +2972,6 @@
 	}
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void buffer_exit_cpu(int cpu)
 {
 	int i;
@@ -2994,7 +2993,6 @@
 		buffer_exit_cpu((unsigned long)hcpu);
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init buffer_init(void)
 {
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 84976cd..71bc87a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -34,6 +34,7 @@
 #include <linux/mempool.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
 #define DECLARE_GLOBALS_HERE
@@ -81,7 +82,7 @@
 extern mempool_t *cifs_req_poolp;
 extern mempool_t *cifs_mid_poolp;
 
-extern kmem_cache_t *cifs_oplock_cachep;
+extern struct kmem_cache *cifs_oplock_cachep;
 
 static int
 cifs_read_super(struct super_block *sb, void *data,
@@ -232,11 +233,11 @@
 		return generic_permission(inode, mask, NULL);
 }
 
-static kmem_cache_t *cifs_inode_cachep;
-static kmem_cache_t *cifs_req_cachep;
-static kmem_cache_t *cifs_mid_cachep;
-kmem_cache_t *cifs_oplock_cachep;
-static kmem_cache_t *cifs_sm_req_cachep;
+static struct kmem_cache *cifs_inode_cachep;
+static struct kmem_cache *cifs_req_cachep;
+static struct kmem_cache *cifs_mid_cachep;
+struct kmem_cache *cifs_oplock_cachep;
+static struct kmem_cache *cifs_sm_req_cachep;
 mempool_t *cifs_sm_req_poolp;
 mempool_t *cifs_req_poolp;
 mempool_t *cifs_mid_poolp;
@@ -245,7 +246,7 @@
 cifs_alloc_inode(struct super_block *sb)
 {
 	struct cifsInodeInfo *cifs_inode;
-	cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
+	cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
 	if (!cifs_inode)
 		return NULL;
 	cifs_inode->cifsAttrs = 0x20;	/* default */
@@ -668,7 +669,7 @@
 };
 
 static void
-cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
+cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct cifsInodeInfo *cifsi = inode;
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 71f7791..2caca06 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -31,6 +31,7 @@
 #include <linux/delay.h>
 #include <linux/completion.h>
 #include <linux/pagevec.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 #include "cifspdu.h"
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index bbc9cd3..aedf683 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -153,7 +153,7 @@
    albeit slightly larger than necessary and maxbuffersize 
    defaults to this and can not be bigger */
 	ret_buf =
-	    (struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | SLAB_NOFS);
+	    (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS);
 
 	/* clear the first few header bytes */
 	/* for most paths, more is cleared in header_assemble */
@@ -192,7 +192,7 @@
    albeit slightly larger than necessary and maxbuffersize 
    defaults to this and can not be bigger */
 	ret_buf =
-	    (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | SLAB_NOFS);
+	    (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS);
 	if (ret_buf) {
 	/* No need to clear memory here, cleared in header assemble */
 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 48d47b4..f80007e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -34,7 +34,7 @@
 #include "cifs_debug.h"
   
 extern mempool_t *cifs_mid_poolp;
-extern kmem_cache_t *cifs_oplock_cachep;
+extern struct kmem_cache *cifs_oplock_cachep;
 
 static struct mid_q_entry *
 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
@@ -51,7 +51,7 @@
 	}
 	
 	temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,
-						    SLAB_KERNEL | SLAB_NOFS);
+						    GFP_KERNEL | GFP_NOFS);
 	if (temp == NULL)
 		return temp;
 	else {
@@ -118,7 +118,7 @@
 		return NULL;
 	}
 	temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
-						       SLAB_KERNEL);
+						       GFP_KERNEL);
 	if (temp == NULL)
 		return temp;
 	else {
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 88d1233..b64659f 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -38,12 +38,12 @@
 static void coda_put_super(struct super_block *);
 static int coda_statfs(struct dentry *dentry, struct kstatfs *buf);
 
-static kmem_cache_t * coda_inode_cachep;
+static struct kmem_cache * coda_inode_cachep;
 
 static struct inode *coda_alloc_inode(struct super_block *sb)
 {
 	struct coda_inode_info *ei;
-	ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, SLAB_KERNEL);
+	ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	memset(&ei->c_fid, 0, sizeof(struct CodaFid));
@@ -58,7 +58,7 @@
 	kmem_cache_free(coda_inode_cachep, ITOC(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct coda_inode_info *ei = (struct coda_inode_info *) foo;
 
diff --git a/fs/compat.c b/fs/compat.c
index 06dad66..a7e3f16 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -871,7 +871,7 @@
 
 	retval = -EINVAL;
 
-	if (type_page) {
+	if (type_page && data_page) {
 		if (!strcmp((char *)type_page, SMBFS_NAME)) {
 			do_smb_super_data_conv((void *)data_page);
 		} else if (!strcmp((char *)type_page, NCPFS_NAME)) {
@@ -1144,7 +1144,9 @@
 	lastdirent = buf.previous;
 	if (lastdirent) {
 		typeof(lastdirent->d_off) d_off = file->f_pos;
-		__put_user_unaligned(d_off, &lastdirent->d_off);
+		error = -EFAULT;
+		if (__put_user_unaligned(d_off, &lastdirent->d_off))
+			goto out_putf;
 		error = count - buf.count;
 	}
 
@@ -1611,14 +1613,14 @@
 		nr &= ~1UL;
 		while (nr) {
 			unsigned long h, l;
-			__get_user(l, ufdset);
-			__get_user(h, ufdset+1);
+			if (__get_user(l, ufdset) || __get_user(h, ufdset+1))
+				return -EFAULT;
 			ufdset += 2;
 			*fdset++ = h << 32 | l;
 			nr -= 2;
 		}
-		if (odd)
-			__get_user(*fdset, ufdset);
+		if (odd && __get_user(*fdset, ufdset))
+			return -EFAULT;
 	} else {
 		/* Tricky, must clear full unsigned long in the
 		 * kernel fdset at the end, this makes sure that
@@ -1630,14 +1632,14 @@
 }
 
 static
-void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
-			unsigned long *fdset)
+int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
+		      unsigned long *fdset)
 {
 	unsigned long odd;
 	nr = ROUND_UP(nr, __COMPAT_NFDBITS);
 
 	if (!ufdset)
-		return;
+		return 0;
 
 	odd = nr & 1UL;
 	nr &= ~1UL;
@@ -1645,13 +1647,14 @@
 		unsigned long h, l;
 		l = *fdset++;
 		h = l >> 32;
-		__put_user(l, ufdset);
-		__put_user(h, ufdset+1);
+		if (__put_user(l, ufdset) || __put_user(h, ufdset+1))
+			return -EFAULT;
 		ufdset += 2;
 		nr -= 2;
 	}
-	if (odd)
-		__put_user(*fdset, ufdset);
+	if (odd && __put_user(*fdset, ufdset))
+		return -EFAULT;
+	return 0;
 }
 
 
@@ -1726,10 +1729,10 @@
 		ret = 0;
 	}
 
-	compat_set_fd_set(n, inp, fds.res_in);
-	compat_set_fd_set(n, outp, fds.res_out);
-	compat_set_fd_set(n, exp, fds.res_ex);
-
+	if (compat_set_fd_set(n, inp, fds.res_in) ||
+	    compat_set_fd_set(n, outp, fds.res_out) ||
+	    compat_set_fd_set(n, exp, fds.res_ex))
+		ret = -EFAULT;
 out:
 	kfree(bits);
 out_nofds:
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 47bb78d..bcc3caf 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -211,8 +211,10 @@
 	up_native =
 		compat_alloc_user_space(sizeof(struct video_still_picture));
 
-	put_user(compat_ptr(fp), &up_native->iFrame);
-	put_user(size, &up_native->size);
+	err =  put_user(compat_ptr(fp), &up_native->iFrame);
+	err |= put_user(size, &up_native->size);
+	if (err)
+		return -EFAULT;
 
 	err = sys_ioctl(fd, cmd, (unsigned long) up_native);
 
@@ -236,8 +238,10 @@
 	err |= get_user(length, &up->length);
 
 	up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
-	put_user(compat_ptr(palp), &up_native->palette);
-	put_user(length, &up_native->length);
+	err  = put_user(compat_ptr(palp), &up_native->palette);
+	err |= put_user(length, &up_native->length);
+	if (err)
+		return -EFAULT;
 
 	err = sys_ioctl(fd, cmd, (unsigned long) up_native);
 
@@ -2043,16 +2047,19 @@
         struct serial_struct ss;
         mm_segment_t oldseg = get_fs();
         __u32 udata;
+	unsigned int base;
 
         if (cmd == TIOCSSERIAL) {
                 if (!access_ok(VERIFY_READ, ss32, sizeof(SS32)))
                         return -EFAULT;
                 if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base)))
 			return -EFAULT;
-                __get_user(udata, &ss32->iomem_base);
+                if (__get_user(udata, &ss32->iomem_base))
+			return -EFAULT;
                 ss.iomem_base = compat_ptr(udata);
-                __get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift);
-                __get_user(ss.port_high, &ss32->port_high);
+                if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
+		    __get_user(ss.port_high, &ss32->port_high))
+			return -EFAULT;
                 ss.iomap_base = 0UL;
         }
         set_fs(KERNEL_DS);
@@ -2063,12 +2070,12 @@
                         return -EFAULT;
                 if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base)))
 			return -EFAULT;
-                __put_user((unsigned long)ss.iomem_base  >> 32 ?
-                            0xffffffff : (unsigned)(unsigned long)ss.iomem_base,
-                            &ss32->iomem_base);
-                __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift);
-                __put_user(ss.port_high, &ss32->port_high);
-
+		base = (unsigned long)ss.iomem_base  >> 32 ?
+			0xffffffff : (unsigned)(unsigned long)ss.iomem_base;
+		if (__put_user(base, &ss32->iomem_base) ||
+		    __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
+		    __put_user(ss.port_high, &ss32->port_high))
+			return -EFAULT;
         }
         return err;
 }
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 3f4ff7a..f92cd30 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -49,7 +49,7 @@
 #define CONFIGFS_NOT_PINNED	(CONFIGFS_ITEM_ATTR)
 
 extern struct vfsmount * configfs_mount;
-extern kmem_cache_t *configfs_dir_cachep;
+extern struct kmem_cache *configfs_dir_cachep;
 
 extern int configfs_is_root(struct config_item *item);
 
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index 68bd5c9..ed67852 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -38,7 +38,7 @@
 
 struct vfsmount * configfs_mount = NULL;
 struct super_block * configfs_sb = NULL;
-kmem_cache_t *configfs_dir_cachep;
+struct kmem_cache *configfs_dir_cachep;
 static int configfs_mnt_count = 0;
 
 static struct super_operations configfs_ops = {
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index a624c3e..0509ced 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -481,6 +481,8 @@
 		pgdata = kmap(page);
 		if (compr_len == 0)
 			; /* hole */
+		else if (compr_len > (PAGE_CACHE_SIZE << 1))
+			printk(KERN_ERR "cramfs: bad compressed blocksize %u\n", compr_len);
 		else {
 			mutex_lock(&read_mutex);
 			bytes_filled = cramfs_uncompress_block(pgdata,
diff --git a/fs/dcache.c b/fs/dcache.c
index fd4a428..d68631f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -43,7 +43,7 @@
 
 EXPORT_SYMBOL(dcache_lock);
 
-static kmem_cache_t *dentry_cache __read_mostly;
+static struct kmem_cache *dentry_cache __read_mostly;
 
 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
 
@@ -68,15 +68,19 @@
 	.age_limit = 45,
 };
 
-static void d_callback(struct rcu_head *head)
+static void __d_free(struct dentry *dentry)
 {
-	struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
-
 	if (dname_external(dentry))
 		kfree(dentry->d_name.name);
 	kmem_cache_free(dentry_cache, dentry); 
 }
 
+static void d_callback(struct rcu_head *head)
+{
+	struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
+	__d_free(dentry);
+}
+
 /*
  * no dcache_lock, please.  The caller must decrement dentry_stat.nr_dentry
  * inside dcache_lock.
@@ -85,7 +89,11 @@
 {
 	if (dentry->d_op && dentry->d_op->d_release)
 		dentry->d_op->d_release(dentry);
- 	call_rcu(&dentry->d_u.d_rcu, d_callback);
+	/* if dentry was never inserted into hash, immediate free is OK */
+	if (dentry->d_hash.pprev == NULL)
+		__d_free(dentry);
+	else
+		call_rcu(&dentry->d_u.d_rcu, d_callback);
 }
 
 /*
@@ -2072,10 +2080,10 @@
 }
 
 /* SLAB cache for __getname() consumers */
-kmem_cache_t *names_cachep __read_mostly;
+struct kmem_cache *names_cachep __read_mostly;
 
 /* SLAB cache for file structures */
-kmem_cache_t *filp_cachep __read_mostly;
+struct kmem_cache *filp_cachep __read_mostly;
 
 EXPORT_SYMBOL(d_genocide);
 
diff --git a/fs/dcookies.c b/fs/dcookies.c
index 0c4b067..21af162 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -37,7 +37,7 @@
 
 static LIST_HEAD(dcookie_users);
 static DEFINE_MUTEX(dcookie_mutex);
-static kmem_cache_t *dcookie_cache __read_mostly;
+static struct kmem_cache *dcookie_cache __read_mostly;
 static struct list_head *dcookie_hashtable __read_mostly;
 static size_t hash_size __read_mostly;
 
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 989b608..5352b03 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -15,7 +15,7 @@
 #include "config.h"
 #include "memory.h"
 
-static kmem_cache_t *lkb_cache;
+static struct kmem_cache *lkb_cache;
 
 
 int dlm_memory_init(void)
diff --git a/fs/dnotify.c b/fs/dnotify.c
index 2b0442d..1f26a2b 100644
--- a/fs/dnotify.c
+++ b/fs/dnotify.c
@@ -23,7 +23,7 @@
 
 int dir_notify_enable __read_mostly = 1;
 
-static kmem_cache_t *dn_cache __read_mostly;
+static struct kmem_cache *dn_cache __read_mostly;
 
 static void redo_inode_mask(struct inode *inode)
 {
@@ -77,7 +77,7 @@
 	inode = filp->f_dentry->d_inode;
 	if (!S_ISDIR(inode->i_mode))
 		return -ENOTDIR;
-	dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL);
+	dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
 	if (dn == NULL)
 		return -ENOMEM;
 	spin_lock(&inode->i_lock);
diff --git a/fs/dquot.c b/fs/dquot.c
index 9af7895..f9cd5e2 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -131,7 +131,7 @@
 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
 
 /* SLAB cache for dquot structures */
-static kmem_cache_t *dquot_cachep;
+static struct kmem_cache *dquot_cachep;
 
 int register_quota_format(struct quota_format_type *fmt)
 {
@@ -600,7 +600,7 @@
 {
 	struct dquot *dquot;
 
-	dquot = kmem_cache_alloc(dquot_cachep, SLAB_NOFS);
+	dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS);
 	if(!dquot)
 		return NODQUOT;
 
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f63a775..7196f50 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -628,7 +628,7 @@
 	num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size;
 	base_extent = (page->index * num_extents_per_page);
 	lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache,
-					   SLAB_KERNEL);
+					   GFP_KERNEL);
 	if (!lower_page_virt) {
 		rc = -ENOMEM;
 		ecryptfs_printk(KERN_ERR, "Error getting page for encrypted "
@@ -1334,7 +1334,7 @@
 		goto out;
 	}
 	/* Released in this function */
-	page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, SLAB_USER);
+	page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER);
 	if (!page_virt) {
 		ecryptfs_printk(KERN_ERR, "Out of memory\n");
 		rc = -ENOMEM;
@@ -1493,7 +1493,7 @@
 	    &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
 
 	/* Read the first page from the underlying file */
-	page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, SLAB_USER);
+	page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, GFP_USER);
 	if (!page_virt) {
 		rc = -ENOMEM;
 		ecryptfs_printk(KERN_ERR, "Unable to allocate page_virt\n");
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index a92ef05..42099e7 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -250,7 +250,7 @@
 	int lower_flags;
 
 	/* Released in ecryptfs_release or end of function if failure */
-	file_info = kmem_cache_alloc(ecryptfs_file_info_cache, SLAB_KERNEL);
+	file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL);
 	ecryptfs_set_file_private(file, file_info);
 	if (!file_info) {
 		ecryptfs_printk(KERN_ERR,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index dfcc684..8a1945a 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -369,7 +369,7 @@
 	BUG_ON(!atomic_read(&lower_dentry->d_count));
 	ecryptfs_set_dentry_private(dentry,
 				    kmem_cache_alloc(ecryptfs_dentry_info_cache,
-						     SLAB_KERNEL));
+						     GFP_KERNEL));
 	if (!ecryptfs_dentry_to_private(dentry)) {
 		rc = -ENOMEM;
 		ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting "
@@ -404,7 +404,7 @@
 	/* Released in this function */
 	page_virt =
 	    (char *)kmem_cache_alloc(ecryptfs_header_cache_2,
-				     SLAB_USER);
+				     GFP_USER);
 	if (!page_virt) {
 		rc = -ENOMEM;
 		ecryptfs_printk(KERN_ERR,
@@ -795,7 +795,7 @@
 	/* Released at out_free: label */
 	ecryptfs_set_file_private(&fake_ecryptfs_file,
 				  kmem_cache_alloc(ecryptfs_file_info_cache,
-						   SLAB_KERNEL));
+						   GFP_KERNEL));
 	if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) {
 		rc = -ENOMEM;
 		goto out;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index c3746f5..745c0f1 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -207,7 +207,7 @@
 	/* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
 	 * at end of function upon failure */
 	auth_tok_list_item =
-	    kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, SLAB_KERNEL);
+	    kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
 	if (!auth_tok_list_item) {
 		ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
 		rc = -ENOMEM;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index a78d87d..3ede12b 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -378,7 +378,7 @@
 	/* Released in ecryptfs_put_super() */
 	ecryptfs_set_superblock_private(sb,
 					kmem_cache_alloc(ecryptfs_sb_info_cache,
-							 SLAB_KERNEL));
+							 GFP_KERNEL));
 	if (!ecryptfs_superblock_to_private(sb)) {
 		ecryptfs_printk(KERN_WARNING, "Out of memory\n");
 		rc = -ENOMEM;
@@ -402,7 +402,7 @@
 	/* through deactivate_super(sb) from get_sb_nodev() */
 	ecryptfs_set_dentry_private(sb->s_root,
 				    kmem_cache_alloc(ecryptfs_dentry_info_cache,
-						     SLAB_KERNEL));
+						     GFP_KERNEL));
 	if (!ecryptfs_dentry_to_private(sb->s_root)) {
 		ecryptfs_printk(KERN_ERR,
 				"dentry_info_cache alloc failed\n");
@@ -546,7 +546,7 @@
 }
 
 static struct ecryptfs_cache_info {
-	kmem_cache_t **cache;
+	struct kmem_cache **cache;
 	const char *name;
 	size_t size;
 	void (*ctor)(void*, struct kmem_cache *, unsigned long);
@@ -691,7 +691,7 @@
 
 static struct ecryptfs_attribute sysfs_attr_version = __ATTR_RO(version);
 
-struct ecryptfs_version_str_map_elem {
+static struct ecryptfs_version_str_map_elem {
 	u32 flag;
 	char *str;
 } ecryptfs_version_str_map[] = {
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 825757a..eaa5daa 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -50,7 +50,7 @@
 	struct inode *inode = NULL;
 
 	ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache,
-					  SLAB_KERNEL);
+					  GFP_KERNEL);
 	if (unlikely(!ecryptfs_inode))
 		goto out;
 	ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat);
diff --git a/fs/efs/super.c b/fs/efs/super.c
index b3f5065..dfebf21 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -52,12 +52,12 @@
 };
 
 
-static kmem_cache_t * efs_inode_cachep;
+static struct kmem_cache * efs_inode_cachep;
 
 static struct inode *efs_alloc_inode(struct super_block *sb)
 {
 	struct efs_inode_info *ei;
-	ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, SLAB_KERNEL);
+	ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -68,7 +68,7 @@
 	kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct efs_inode_info *ei = (struct efs_inode_info *) foo;
 
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ae228ec..88a6f8d 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -283,10 +283,10 @@
 static struct poll_safewake psw;
 
 /* Slab cache used to allocate "struct epitem" */
-static kmem_cache_t *epi_cache __read_mostly;
+static struct kmem_cache *epi_cache __read_mostly;
 
 /* Slab cache used to allocate "struct eppoll_entry" */
-static kmem_cache_t *pwq_cache __read_mostly;
+static struct kmem_cache *pwq_cache __read_mostly;
 
 /* Virtual fs used to allocate inodes for eventpoll files */
 static struct vfsmount *eventpoll_mnt __read_mostly;
@@ -961,7 +961,7 @@
 	struct epitem *epi = ep_item_from_epqueue(pt);
 	struct eppoll_entry *pwq;
 
-	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) {
+	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
 		init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
 		pwq->whead = whead;
 		pwq->base = epi;
@@ -1004,7 +1004,7 @@
 	struct ep_pqueue epq;
 
 	error = -ENOMEM;
-	if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL)))
+	if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
 		goto eexit_1;
 
 	/* Item initialization follow here ... */
diff --git a/fs/exec.c b/fs/exec.c
index d993ea1..add0e03 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -404,7 +404,7 @@
 		bprm->loader += stack_base;
 	bprm->exec += stack_base;
 
-	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!mpnt)
 		return -ENOMEM;
 
@@ -1515,7 +1515,8 @@
 		ispipe = 1;
  	} else
  		file = filp_open(corename,
-				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
+				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
+				 0600);
 	if (IS_ERR(file))
 		goto fail_unlock;
 	inode = file->f_dentry->d_inode;
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 1dfba77..e3cf8c8 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -44,6 +44,7 @@
 		if (!S_ISDIR(inode->i_mode))
 			flags &= ~EXT2_DIRSYNC_FL;
 
+		mutex_lock(&inode->i_mutex);
 		oldflags = ei->i_flags;
 
 		/*
@@ -53,13 +54,16 @@
 		 * This test looks nicer. Thanks to Pauline Middelink
 		 */
 		if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
-			if (!capable(CAP_LINUX_IMMUTABLE))
+			if (!capable(CAP_LINUX_IMMUTABLE)) {
+				mutex_unlock(&inode->i_mutex);
 				return -EPERM;
+			}
 		}
 
 		flags = flags & EXT2_FL_USER_MODIFIABLE;
 		flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
 		ei->i_flags = flags;
+		mutex_unlock(&inode->i_mutex);
 
 		ext2_set_inode_flags(inode);
 		inode->i_ctime = CURRENT_TIME_SEC;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index d8b9abd..255cef5 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -135,12 +135,12 @@
 	return;
 }
 
-static kmem_cache_t * ext2_inode_cachep;
+static struct kmem_cache * ext2_inode_cachep;
 
 static struct inode *ext2_alloc_inode(struct super_block *sb)
 {
 	struct ext2_inode_info *ei;
-	ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, SLAB_KERNEL);
+	ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 #ifdef CONFIG_EXT2_FS_POSIX_ACL
@@ -156,7 +156,7 @@
 	kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
 
@@ -1090,8 +1090,10 @@
 {
 	struct super_block *sb = dentry->d_sb;
 	struct ext2_sb_info *sbi = EXT2_SB(sb);
+	struct ext2_super_block *es = sbi->s_es;
 	unsigned long overhead;
 	int i;
+	u64 fsid;
 
 	if (test_opt (sb, MINIX_DF))
 		overhead = 0;
@@ -1104,7 +1106,7 @@
 		 * All of the blocks before first_data_block are
 		 * overhead
 		 */
-		overhead = le32_to_cpu(sbi->s_es->s_first_data_block);
+		overhead = le32_to_cpu(es->s_first_data_block);
 
 		/*
 		 * Add the overhead attributed to the superblock and
@@ -1125,14 +1127,18 @@
 
 	buf->f_type = EXT2_SUPER_MAGIC;
 	buf->f_bsize = sb->s_blocksize;
-	buf->f_blocks = le32_to_cpu(sbi->s_es->s_blocks_count) - overhead;
+	buf->f_blocks = le32_to_cpu(es->s_blocks_count) - overhead;
 	buf->f_bfree = ext2_count_free_blocks(sb);
-	buf->f_bavail = buf->f_bfree - le32_to_cpu(sbi->s_es->s_r_blocks_count);
-	if (buf->f_bfree < le32_to_cpu(sbi->s_es->s_r_blocks_count))
+	buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
+	if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
 		buf->f_bavail = 0;
-	buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count);
-	buf->f_ffree = ext2_count_free_inodes (sb);
+	buf->f_files = le32_to_cpu(es->s_inodes_count);
+	buf->f_ffree = ext2_count_free_inodes(sb);
 	buf->f_namelen = EXT2_NAME_LEN;
+	fsid = le64_to_cpup((void *)es->s_uuid) ^
+	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
+	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
 	return 0;
 }
 
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index af52a7f..247efd0 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -342,12 +342,9 @@
 	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
 		return;
 
-	lock_super(sb);
-	EXT2_SB(sb)->s_es->s_feature_compat |=
-		cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR);
+	EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
 	sb->s_dirt = 1;
 	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
-	unlock_super(sb);
 }
 
 /*
diff --git a/fs/ext3/Makefile b/fs/ext3/Makefile
index 704cd44..e77766a 100644
--- a/fs/ext3/Makefile
+++ b/fs/ext3/Makefile
@@ -5,7 +5,7 @@
 obj-$(CONFIG_EXT3_FS) += ext3.o
 
 ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-	   ioctl.o namei.o super.o symlink.o hash.o resize.o
+	   ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
 
 ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
 ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index b41a7d7..2216174 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -144,7 +144,7 @@
 
 	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
 	while (n) {
-		rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
+		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
 		if (verbose)
 			printk("reservation window 0x%p "
 			       "start:  %lu, end:  %lu\n",
@@ -730,7 +730,7 @@
 		here = 0;
 
 	p = ((char *)bh->b_data) + (here >> 3);
-	r = memscan(p, 0, (maxblocks - here + 7) >> 3);
+	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
 	next = (r - ((char *)bh->b_data)) << 3;
 
 	if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
@@ -949,7 +949,7 @@
 
 		prev = rsv;
 		next = rb_next(&rsv->rsv_node);
-		rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node);
+		rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
 
 		/*
 		 * Reached the last reservation, we can just append to the
@@ -1148,7 +1148,7 @@
 	 * check if the first free block is within the
 	 * free space we just reserved
 	 */
-	if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end)
+	if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
 		return 0;		/* success */
 	/*
 	 * if the first free bit we found is out of the reservable space
@@ -1193,7 +1193,7 @@
 	if (!next)
 		my_rsv->rsv_end += size;
 	else {
-		next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
+		next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
 
 		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
 			my_rsv->rsv_end += size;
@@ -1271,7 +1271,7 @@
 	}
 	/*
 	 * grp_goal is a group relative block number (if there is a goal)
-	 * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
+	 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
 	 * first block is a filesystem wide block number
 	 * first block is the block number of the first block in this group
 	 */
@@ -1307,10 +1307,14 @@
 			if (!goal_in_my_reservation(&my_rsv->rsv_window,
 							grp_goal, group, sb))
 				grp_goal = -1;
-		} else if (grp_goal > 0 &&
-			  (my_rsv->rsv_end-grp_goal+1) < *count)
-			try_to_extend_reservation(my_rsv, sb,
-					*count-my_rsv->rsv_end + grp_goal - 1);
+		} else if (grp_goal >= 0) {
+			int curr = my_rsv->rsv_end -
+					(grp_goal + group_first_block) + 1;
+
+			if (curr < *count)
+				try_to_extend_reservation(my_rsv, sb,
+							*count - curr);
+		}
 
 		if ((my_rsv->rsv_start > group_last_block) ||
 				(my_rsv->rsv_end < group_first_block)) {
@@ -1511,10 +1515,8 @@
 		if (group_no >= ngroups)
 			group_no = 0;
 		gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
-		if (!gdp) {
-			*errp = -EIO;
-			goto out;
-		}
+		if (!gdp)
+			goto io_error;
 		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
 		/*
 		 * skip this group if the number of
@@ -1548,6 +1550,7 @@
 	 */
 	if (my_rsv) {
 		my_rsv = NULL;
+		windowsz = 0;
 		group_no = goal_group;
 		goto retry_alloc;
 	}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index d0b54f3..5a9313e 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -154,6 +154,9 @@
 			ext3_error (sb, "ext3_readdir",
 				"directory #%lu contains a hole at offset %lu",
 				inode->i_ino, (unsigned long)filp->f_pos);
+			/* corrupt size?  Maybe no more blocks to read */
+			if (filp->f_pos > inode->i_blocks << 9)
+				break;
 			filp->f_pos += sb->s_blocksize - offset;
 			continue;
 		}
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
new file mode 100644
index 0000000..e1f91fd
--- /dev/null
+++ b/fs/ext3/ext3_jbd.c
@@ -0,0 +1,59 @@
+/*
+ * Interface between ext3 and JBD
+ */
+
+#include <linux/ext3_jbd.h>
+
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = journal_get_undo_access(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = journal_get_write_access(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_forget(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = journal_forget(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+				unsigned long blocknr, struct buffer_head *bh)
+{
+	int err = journal_revoke(handle, blocknr, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_get_create_access(const char *where,
+				handle_t *handle, struct buffer_head *bh)
+{
+	int err = journal_get_create_access(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_dirty_metadata(const char *where,
+				handle_t *handle, struct buffer_head *bh)
+{
+	int err = journal_dirty_metadata(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 03ba5bc..beaf25f 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1148,37 +1148,102 @@
 	return ext3_journal_get_write_access(handle, bh);
 }
 
+/*
+ * The idea of this helper function is following:
+ * if prepare_write has allocated some blocks, but not all of them, the
+ * transaction must include the content of the newly allocated blocks.
+ * This content is expected to be set to zeroes by block_prepare_write().
+ * 2006/10/14  SAW
+ */
+static int ext3_prepare_failure(struct file *file, struct page *page,
+				unsigned from, unsigned to)
+{
+	struct address_space *mapping;
+	struct buffer_head *bh, *head, *next;
+	unsigned block_start, block_end;
+	unsigned blocksize;
+	int ret;
+	handle_t *handle = ext3_journal_current_handle();
+
+	mapping = page->mapping;
+	if (ext3_should_writeback_data(mapping->host)) {
+		/* optimization: no constraints about data */
+skip:
+		return ext3_journal_stop(handle);
+	}
+
+	head = page_buffers(page);
+	blocksize = head->b_size;
+	for (	bh = head, block_start = 0;
+		bh != head || !block_start;
+	    	block_start = block_end, bh = next)
+	{
+		next = bh->b_this_page;
+		block_end = block_start + blocksize;
+		if (block_end <= from)
+			continue;
+		if (block_start >= to) {
+			block_start = to;
+			break;
+		}
+		if (!buffer_mapped(bh))
+		/* prepare_write failed on this bh */
+			break;
+		if (ext3_should_journal_data(mapping->host)) {
+			ret = do_journal_get_write_access(handle, bh);
+			if (ret) {
+				ext3_journal_stop(handle);
+				return ret;
+			}
+		}
+	/*
+	 * block_start here becomes the first block where the current iteration
+	 * of prepare_write failed.
+	 */
+	}
+	if (block_start <= from)
+		goto skip;
+
+	/* commit allocated and zeroed buffers */
+	return mapping->a_ops->commit_write(file, page, from, block_start);
+}
+
 static int ext3_prepare_write(struct file *file, struct page *page,
 			      unsigned from, unsigned to)
 {
 	struct inode *inode = page->mapping->host;
-	int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
+	int ret, ret2;
+	int needed_blocks = ext3_writepage_trans_blocks(inode);
 	handle_t *handle;
 	int retries = 0;
 
 retry:
 	handle = ext3_journal_start(inode, needed_blocks);
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out;
-	}
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
 	if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
 		ret = nobh_prepare_write(page, from, to, ext3_get_block);
 	else
 		ret = block_prepare_write(page, from, to, ext3_get_block);
 	if (ret)
-		goto prepare_write_failed;
+		goto failure;
 
 	if (ext3_should_journal_data(inode)) {
 		ret = walk_page_buffers(handle, page_buffers(page),
 				from, to, NULL, do_journal_get_write_access);
+		if (ret)
+			/* fatal error, just put the handle and return */
+			journal_stop(handle);
 	}
-prepare_write_failed:
-	if (ret)
-		ext3_journal_stop(handle);
+	return ret;
+
+failure:
+	ret2 = ext3_prepare_failure(file, page, from, to);
+	if (ret2 < 0)
+		return ret2;
 	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
 		goto retry;
-out:
+	/* retry number exceeded, or other error like -EDQUOT */
 	return ret;
 }
 
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 906731a..60d2f9d 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -552,6 +552,15 @@
 					   dir->i_sb->s_blocksize -
 					   EXT3_DIR_REC_LEN(0));
 	for (; de < top; de = ext3_next_entry(de)) {
+		if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+					(block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
+						+((char *)de - bh->b_data))) {
+			/* On error, skip the f_pos to the next block. */
+			dir_file->f_pos = (dir_file->f_pos |
+					(dir->i_sb->s_blocksize - 1)) + 1;
+			brelse (bh);
+			return count;
+		}
 		ext3fs_dirhash(de->name, de->name_len, hinfo);
 		if ((hinfo->hash < start_hash) ||
 		    ((hinfo->hash == start_hash) &&
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index afc2d4f..580b8a6 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -436,7 +436,7 @@
 	return;
 }
 
-static kmem_cache_t *ext3_inode_cachep;
+static struct kmem_cache *ext3_inode_cachep;
 
 /*
  * Called inside transaction, so use GFP_NOFS
@@ -445,7 +445,7 @@
 {
 	struct ext3_inode_info *ei;
 
-	ei = kmem_cache_alloc(ext3_inode_cachep, SLAB_NOFS);
+	ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
 	if (!ei)
 		return NULL;
 #ifdef CONFIG_EXT3_FS_POSIX_ACL
@@ -462,7 +462,7 @@
 	kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
 
@@ -1264,6 +1264,12 @@
 		return;
 	}
 
+	if (bdev_read_only(sb->s_bdev)) {
+		printk(KERN_ERR "EXT3-fs: write access "
+			"unavailable, skipping orphan cleanup.\n");
+		return;
+	}
+
 	if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
 		if (es->s_last_orphan)
 			jbd_debug(1, "Errors on filesystem, "
@@ -2387,6 +2393,7 @@
 	struct ext3_super_block *es = sbi->s_es;
 	ext3_fsblk_t overhead;
 	int i;
+	u64 fsid;
 
 	if (test_opt (sb, MINIX_DF))
 		overhead = 0;
@@ -2433,6 +2440,10 @@
 	buf->f_files = le32_to_cpu(es->s_inodes_count);
 	buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
 	buf->f_namelen = EXT3_NAME_LEN;
+	fsid = le64_to_cpup((void *)es->s_uuid) ^
+	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
+	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
 	return 0;
 }
 
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index f86f248..99857a40 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -459,14 +459,11 @@
 	if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
 		return;
 
-	lock_super(sb);
 	if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) {
-		EXT3_SB(sb)->s_es->s_feature_compat |=
-			cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR);
+		EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR);
 		sb->s_dirt = 1;
 		ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
 	}
-	unlock_super(sb);
 }
 
 /*
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index a6acb96..ae6e7e5 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -5,7 +5,8 @@
 obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o
 
 ext4dev-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-	   ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o
+		   ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
+		   ext4_jbd2.o
 
 ext4dev-$(CONFIG_EXT4DEV_FS_XATTR)	+= xattr.o xattr_user.o xattr_trusted.o
 ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL)	+= acl.o
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 5d45582..c4dd110 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -165,7 +165,7 @@
 
 	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
 	while (n) {
-		rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node);
+		rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
 		if (verbose)
 			printk("reservation window 0x%p "
 			       "start:  %llu, end:  %llu\n",
@@ -747,7 +747,7 @@
 		here = 0;
 
 	p = ((char *)bh->b_data) + (here >> 3);
-	r = memscan(p, 0, (maxblocks - here + 7) >> 3);
+	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
 	next = (r - ((char *)bh->b_data)) << 3;
 
 	if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
@@ -966,7 +966,7 @@
 
 		prev = rsv;
 		next = rb_next(&rsv->rsv_node);
-		rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node);
+		rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
 
 		/*
 		 * Reached the last reservation, we can just append to the
@@ -1165,7 +1165,7 @@
 	 * check if the first free block is within the
 	 * free space we just reserved
 	 */
-	if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end)
+	if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
 		return 0;		/* success */
 	/*
 	 * if the first free bit we found is out of the reservable space
@@ -1210,7 +1210,7 @@
 	if (!next)
 		my_rsv->rsv_end += size;
 	else {
-		next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node);
+		next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
 
 		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
 			my_rsv->rsv_end += size;
@@ -1288,7 +1288,7 @@
 	}
 	/*
 	 * grp_goal is a group relative block number (if there is a goal)
-	 * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
+	 * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
 	 * first block is a filesystem wide block number
 	 * first block is the block number of the first block in this group
 	 */
@@ -1324,10 +1324,14 @@
 			if (!goal_in_my_reservation(&my_rsv->rsv_window,
 							grp_goal, group, sb))
 				grp_goal = -1;
-		} else if (grp_goal > 0 &&
-			  (my_rsv->rsv_end-grp_goal+1) < *count)
-			try_to_extend_reservation(my_rsv, sb,
-					*count-my_rsv->rsv_end + grp_goal - 1);
+		} else if (grp_goal >= 0) {
+			int curr = my_rsv->rsv_end -
+					(grp_goal + group_first_block) + 1;
+
+			if (curr < *count)
+				try_to_extend_reservation(my_rsv, sb,
+							*count - curr);
+		}
 
 		if ((my_rsv->rsv_start > group_last_block) ||
 				(my_rsv->rsv_end < group_first_block)) {
@@ -1525,10 +1529,8 @@
 		if (group_no >= ngroups)
 			group_no = 0;
 		gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
-		if (!gdp) {
-			*errp = -EIO;
-			goto out;
-		}
+		if (!gdp)
+			goto io_error;
 		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
 		/*
 		 * skip this group if the number of
@@ -1562,6 +1564,7 @@
 	 */
 	if (my_rsv) {
 		my_rsv = NULL;
+		windowsz = 0;
 		group_no = goal_group;
 		goto retry_alloc;
 	}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index f859578..f2ed3e7 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -153,6 +153,9 @@
 			ext4_error (sb, "ext4_readdir",
 				"directory #%lu contains a hole at offset %lu",
 				inode->i_ino, (unsigned long)filp->f_pos);
+			/* corrupt size?  Maybe no more blocks to read */
+			if (filp->f_pos > inode->i_blocks << 9)
+				break;
 			filp->f_pos += sb->s_blocksize - offset;
 			continue;
 		}
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
new file mode 100644
index 0000000..d6afe4e
--- /dev/null
+++ b/fs/ext4/ext4_jbd2.c
@@ -0,0 +1,59 @@
+/*
+ * Interface between ext4 and JBD
+ */
+
+#include <linux/ext4_jbd2.h>
+
+int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = jbd2_journal_get_undo_access(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_get_write_access(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = jbd2_journal_get_write_access(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_forget(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = jbd2_journal_forget(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_revoke(const char *where, handle_t *handle,
+				ext4_fsblk_t blocknr, struct buffer_head *bh)
+{
+	int err = jbd2_journal_revoke(handle, blocknr, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_get_create_access(const char *where,
+				handle_t *handle, struct buffer_head *bh)
+{
+	int err = jbd2_journal_get_create_access(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_dirty_metadata(const char *where,
+				handle_t *handle, struct buffer_head *bh)
+{
+	int err = jbd2_journal_dirty_metadata(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2608dce..dc2724f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -48,7 +48,7 @@
  * ext_pblock:
  * combine low and high parts of physical block number into ext4_fsblk_t
  */
-static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
+static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
 {
 	ext4_fsblk_t block;
 
@@ -61,7 +61,7 @@
  * idx_pblock:
  * combine low and high parts of a leaf physical block number into ext4_fsblk_t
  */
-static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
+static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
 {
 	ext4_fsblk_t block;
 
@@ -75,7 +75,7 @@
  * stores a large physical block number into an extent struct,
  * breaking it into parts
  */
-static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
+static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
 {
 	ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
 	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -86,7 +86,7 @@
  * stores a large physical block number into an index struct,
  * breaking it into parts
  */
-static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
+static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
 {
 	ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
 	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -186,7 +186,8 @@
 		depth = path->p_depth;
 
 		/* try to predict block placement */
-		if ((ex = path[depth].p_ext))
+		ex = path[depth].p_ext;
+		if (ex)
 			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
 
 		/* it looks like index is empty;
@@ -215,7 +216,7 @@
 	return newblock;
 }
 
-static inline int ext4_ext_space_block(struct inode *inode)
+static int ext4_ext_space_block(struct inode *inode)
 {
 	int size;
 
@@ -228,7 +229,7 @@
 	return size;
 }
 
-static inline int ext4_ext_space_block_idx(struct inode *inode)
+static int ext4_ext_space_block_idx(struct inode *inode)
 {
 	int size;
 
@@ -241,7 +242,7 @@
 	return size;
 }
 
-static inline int ext4_ext_space_root(struct inode *inode)
+static int ext4_ext_space_root(struct inode *inode)
 {
 	int size;
 
@@ -255,7 +256,7 @@
 	return size;
 }
 
-static inline int ext4_ext_space_root_idx(struct inode *inode)
+static int ext4_ext_space_root_idx(struct inode *inode)
 {
 	int size;
 
@@ -476,13 +477,12 @@
 
 	/* account possible depth increase */
 	if (!path) {
-		path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2),
+		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
 				GFP_NOFS);
 		if (!path)
 			return ERR_PTR(-ENOMEM);
 		alloc = 1;
 	}
-	memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
 	path[0].p_hdr = eh;
 
 	/* walk through the tree */
@@ -543,7 +543,8 @@
 	struct ext4_extent_idx *ix;
 	int len, err;
 
-	if ((err = ext4_ext_get_access(handle, inode, curp)))
+	err = ext4_ext_get_access(handle, inode, curp);
+	if (err)
 		return err;
 
 	BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
@@ -641,10 +642,9 @@
 	 * We need this to handle errors and free blocks
 	 * upon them.
 	 */
-	ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
+	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
 	if (!ablocks)
 		return -ENOMEM;
-	memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth);
 
 	/* allocate all needed blocks */
 	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
@@ -665,7 +665,8 @@
 	}
 	lock_buffer(bh);
 
-	if ((err = ext4_journal_get_create_access(handle, bh)))
+	err = ext4_journal_get_create_access(handle, bh);
+	if (err)
 		goto cleanup;
 
 	neh = ext_block_hdr(bh);
@@ -702,18 +703,21 @@
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
 
-	if ((err = ext4_journal_dirty_metadata(handle, bh)))
+	err = ext4_journal_dirty_metadata(handle, bh);
+	if (err)
 		goto cleanup;
 	brelse(bh);
 	bh = NULL;
 
 	/* correct old leaf */
 	if (m) {
-		if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+		err = ext4_ext_get_access(handle, inode, path + depth);
+		if (err)
 			goto cleanup;
 		path[depth].p_hdr->eh_entries =
 		     cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
-		if ((err = ext4_ext_dirty(handle, inode, path + depth)))
+		err = ext4_ext_dirty(handle, inode, path + depth);
+		if (err)
 			goto cleanup;
 
 	}
@@ -736,7 +740,8 @@
 		}
 		lock_buffer(bh);
 
-		if ((err = ext4_journal_get_create_access(handle, bh)))
+		err = ext4_journal_get_create_access(handle, bh);
+		if (err)
 			goto cleanup;
 
 		neh = ext_block_hdr(bh);
@@ -780,7 +785,8 @@
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
 
-		if ((err = ext4_journal_dirty_metadata(handle, bh)))
+		err = ext4_journal_dirty_metadata(handle, bh);
+		if (err)
 			goto cleanup;
 		brelse(bh);
 		bh = NULL;
@@ -800,9 +806,6 @@
 	}
 
 	/* insert new index */
-	if (err)
-		goto cleanup;
-
 	err = ext4_ext_insert_index(handle, inode, path + at,
 				    le32_to_cpu(border), newblock);
 
@@ -857,7 +860,8 @@
 	}
 	lock_buffer(bh);
 
-	if ((err = ext4_journal_get_create_access(handle, bh))) {
+	err = ext4_journal_get_create_access(handle, bh);
+	if (err) {
 		unlock_buffer(bh);
 		goto out;
 	}
@@ -877,11 +881,13 @@
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
 
-	if ((err = ext4_journal_dirty_metadata(handle, bh)))
+	err = ext4_journal_dirty_metadata(handle, bh);
+	if (err)
 		goto out;
 
 	/* create index in new top-level index: num,max,pointer */
-	if ((err = ext4_ext_get_access(handle, inode, curp)))
+	err = ext4_ext_get_access(handle, inode, curp);
+	if (err)
 		goto out;
 
 	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
@@ -1073,27 +1079,31 @@
 	 */
 	k = depth - 1;
 	border = path[depth].p_ext->ee_block;
-	if ((err = ext4_ext_get_access(handle, inode, path + k)))
+	err = ext4_ext_get_access(handle, inode, path + k);
+	if (err)
 		return err;
 	path[k].p_idx->ei_block = border;
-	if ((err = ext4_ext_dirty(handle, inode, path + k)))
+	err = ext4_ext_dirty(handle, inode, path + k);
+	if (err)
 		return err;
 
 	while (k--) {
 		/* change all left-side indexes */
 		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
 			break;
-		if ((err = ext4_ext_get_access(handle, inode, path + k)))
+		err = ext4_ext_get_access(handle, inode, path + k);
+		if (err)
 			break;
 		path[k].p_idx->ei_block = border;
-		if ((err = ext4_ext_dirty(handle, inode, path + k)))
+		err = ext4_ext_dirty(handle, inode, path + k);
+		if (err)
 			break;
 	}
 
 	return err;
 }
 
-static int inline
+static int
 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
 				struct ext4_extent *ex2)
 {
@@ -1145,7 +1155,8 @@
 				le16_to_cpu(newext->ee_len),
 				le32_to_cpu(ex->ee_block),
 				le16_to_cpu(ex->ee_len), ext_pblock(ex));
-		if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+		err = ext4_ext_get_access(handle, inode, path + depth);
+		if (err)
 			return err;
 		ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
 					 + le16_to_cpu(newext->ee_len));
@@ -1195,7 +1206,8 @@
 has_space:
 	nearex = path[depth].p_ext;
 
-	if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+	err = ext4_ext_get_access(handle, inode, path + depth);
+	if (err)
 		goto cleanup;
 
 	if (!nearex) {
@@ -1383,7 +1395,7 @@
 	return err;
 }
 
-static inline void
+static void
 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
 			__u32 len, __u32 start, int type)
 {
@@ -1401,7 +1413,7 @@
  * calculate boundaries of the gap that the requested block fits into
  * and cache this gap
  */
-static inline void
+static void
 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
 				unsigned long block)
 {
@@ -1442,7 +1454,7 @@
 	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
 }
 
-static inline int
+static int
 ext4_ext_in_cache(struct inode *inode, unsigned long block,
 			struct ext4_extent *ex)
 {
@@ -1489,10 +1501,12 @@
 	path--;
 	leaf = idx_pblock(path->p_idx);
 	BUG_ON(path->p_hdr->eh_entries == 0);
-	if ((err = ext4_ext_get_access(handle, inode, path)))
+	err = ext4_ext_get_access(handle, inode, path);
+	if (err)
 		return err;
 	path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
-	if ((err = ext4_ext_dirty(handle, inode, path)))
+	err = ext4_ext_dirty(handle, inode, path);
+	if (err)
 		return err;
 	ext_debug("index is empty, remove it, free block %llu\n", leaf);
 	bh = sb_find_get_block(inode->i_sb, leaf);
@@ -1509,7 +1523,7 @@
  * the caller should calculate credits under truncate_mutex and
  * pass the actual path.
  */
-int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
+int ext4_ext_calc_credits_for_insert(struct inode *inode,
 						struct ext4_ext_path *path)
 {
 	int depth, needed;
@@ -1534,16 +1548,17 @@
 
 	/*
 	 * tree can be full, so it would need to grow in depth:
-	 * allocation + old root + new root
+	 * we need one credit to modify old root, credits for
+	 * new root will be added in split accounting
 	 */
-	needed += 2 + 1 + 1;
+	needed += 1;
 
 	/*
 	 * Index split can happen, we would need:
 	 *    allocate intermediate indexes (bitmap + group)
 	 *  + change two blocks at each level, but root (already included)
 	 */
-	needed = (depth * 2) + (depth * 2);
+	needed += (depth * 2) + (depth * 2);
 
 	/* any allocation modifies superblock */
 	needed += 1;
@@ -1718,7 +1733,7 @@
  * ext4_ext_more_to_rm:
  * returns 1 if current index has to be freed (even partial)
  */
-static int inline
+static int
 ext4_ext_more_to_rm(struct ext4_ext_path *path)
 {
 	BUG_ON(path->p_idx == NULL);
@@ -1756,12 +1771,11 @@
 	 * We start scanning from right side, freeing all the blocks
 	 * after i_size and walking into the tree depth-wise.
 	 */
-	path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
+	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
 	if (path == NULL) {
 		ext4_journal_stop(handle);
 		return -ENOMEM;
 	}
-	memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
 	path[0].p_hdr = ext_inode_hdr(inode);
 	if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
 		err = -EIO;
@@ -1932,7 +1946,8 @@
 	mutex_lock(&EXT4_I(inode)->truncate_mutex);
 
 	/* check in cache */
-	if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) {
+	goal = ext4_ext_in_cache(inode, iblock, &newex);
+	if (goal) {
 		if (goal == EXT4_EXT_CACHE_GAP) {
 			if (!create) {
 				/* block isn't allocated yet and
@@ -1971,7 +1986,8 @@
 	 */
 	BUG_ON(path[depth].p_ext == NULL && depth != 0);
 
-	if ((ex = path[depth].p_ext)) {
+	ex = path[depth].p_ext;
+	if (ex) {
 	        unsigned long ee_block = le32_to_cpu(ex->ee_block);
 		ext4_fsblk_t ee_start = ext_pblock(ex);
 		unsigned short ee_len  = le16_to_cpu(ex->ee_len);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0a60ec5..1d85d4e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1147,37 +1147,102 @@
 	return ext4_journal_get_write_access(handle, bh);
 }
 
+/*
+ * The idea of this helper function is following:
+ * if prepare_write has allocated some blocks, but not all of them, the
+ * transaction must include the content of the newly allocated blocks.
+ * This content is expected to be set to zeroes by block_prepare_write().
+ * 2006/10/14  SAW
+ */
+static int ext4_prepare_failure(struct file *file, struct page *page,
+				unsigned from, unsigned to)
+{
+	struct address_space *mapping;
+	struct buffer_head *bh, *head, *next;
+	unsigned block_start, block_end;
+	unsigned blocksize;
+	int ret;
+	handle_t *handle = ext4_journal_current_handle();
+
+	mapping = page->mapping;
+	if (ext4_should_writeback_data(mapping->host)) {
+		/* optimization: no constraints about data */
+skip:
+		return ext4_journal_stop(handle);
+	}
+
+	head = page_buffers(page);
+	blocksize = head->b_size;
+	for (	bh = head, block_start = 0;
+		bh != head || !block_start;
+	    	block_start = block_end, bh = next)
+	{
+		next = bh->b_this_page;
+		block_end = block_start + blocksize;
+		if (block_end <= from)
+			continue;
+		if (block_start >= to) {
+			block_start = to;
+			break;
+		}
+		if (!buffer_mapped(bh))
+		/* prepare_write failed on this bh */
+			break;
+		if (ext4_should_journal_data(mapping->host)) {
+			ret = do_journal_get_write_access(handle, bh);
+			if (ret) {
+				ext4_journal_stop(handle);
+				return ret;
+			}
+		}
+	/*
+	 * block_start here becomes the first block where the current iteration
+	 * of prepare_write failed.
+	 */
+	}
+	if (block_start <= from)
+		goto skip;
+
+	/* commit allocated and zeroed buffers */
+	return mapping->a_ops->commit_write(file, page, from, block_start);
+}
+
 static int ext4_prepare_write(struct file *file, struct page *page,
 			      unsigned from, unsigned to)
 {
 	struct inode *inode = page->mapping->host;
-	int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
+	int ret, ret2;
+	int needed_blocks = ext4_writepage_trans_blocks(inode);
 	handle_t *handle;
 	int retries = 0;
 
 retry:
 	handle = ext4_journal_start(inode, needed_blocks);
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out;
-	}
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
 	if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
 		ret = nobh_prepare_write(page, from, to, ext4_get_block);
 	else
 		ret = block_prepare_write(page, from, to, ext4_get_block);
 	if (ret)
-		goto prepare_write_failed;
+		goto failure;
 
 	if (ext4_should_journal_data(inode)) {
 		ret = walk_page_buffers(handle, page_buffers(page),
 				from, to, NULL, do_journal_get_write_access);
+		if (ret)
+			/* fatal error, just put the handle and return */
+			journal_stop(handle);
 	}
-prepare_write_failed:
-	if (ret)
-		ext4_journal_stop(handle);
+	return ret;
+
+failure:
+	ret2 = ext4_prepare_failure(file, page, from, to);
+	if (ret2 < 0)
+		return ret2;
 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
 		goto retry;
-out:
+	/* retry number exceeded, or other error like -EDQUOT */
 	return ret;
 }
 
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 8b1bd03..859990e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -552,6 +552,15 @@
 					   dir->i_sb->s_blocksize -
 					   EXT4_DIR_REC_LEN(0));
 	for (; de < top; de = ext4_next_entry(de)) {
+		if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+					(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+						+((char *)de - bh->b_data))) {
+			/* On error, skip the f_pos to the next block. */
+			dir_file->f_pos = (dir_file->f_pos |
+					(dir->i_sb->s_blocksize - 1)) + 1;
+			brelse (bh);
+			return count;
+		}
 		ext4fs_dirhash(de->name, de->name_len, hinfo);
 		if ((hinfo->hash < start_hash) ||
 		    ((hinfo->hash == start_hash) &&
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b4b022a..486a641 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -486,7 +486,7 @@
 	return;
 }
 
-static kmem_cache_t *ext4_inode_cachep;
+static struct kmem_cache *ext4_inode_cachep;
 
 /*
  * Called inside transaction, so use GFP_NOFS
@@ -495,7 +495,7 @@
 {
 	struct ext4_inode_info *ei;
 
-	ei = kmem_cache_alloc(ext4_inode_cachep, SLAB_NOFS);
+	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
 	if (!ei)
 		return NULL;
 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
@@ -513,7 +513,7 @@
 	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
 
@@ -1321,6 +1321,12 @@
 		return;
 	}
 
+	if (bdev_read_only(sb->s_bdev)) {
+		printk(KERN_ERR "EXT4-fs: write access "
+			"unavailable, skipping orphan cleanup.\n");
+		return;
+	}
+
 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
 		if (es->s_last_orphan)
 			jbd_debug(1, "Errors on filesystem, "
@@ -2460,6 +2466,7 @@
 	struct ext4_super_block *es = sbi->s_es;
 	ext4_fsblk_t overhead;
 	int i;
+	u64 fsid;
 
 	if (test_opt (sb, MINIX_DF))
 		overhead = 0;
@@ -2506,6 +2513,10 @@
 	buf->f_files = le32_to_cpu(es->s_inodes_count);
 	buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
 	buf->f_namelen = EXT4_NAME_LEN;
+	fsid = le64_to_cpup((void *)es->s_uuid) ^
+	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
+	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
 	return 0;
 }
 
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 63233cd..dc969c3 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -459,14 +459,11 @@
 	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
 		return;
 
-	lock_super(sb);
 	if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
-		EXT4_SB(sb)->s_es->s_feature_compat |=
-			cpu_to_le32(EXT4_FEATURE_COMPAT_EXT_ATTR);
+		EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
 		sb->s_dirt = 1;
 		ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
 	}
-	unlock_super(sb);
 }
 
 /*
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 82cc4f59..05c2941 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -34,9 +34,9 @@
 	return FAT_MAX_CACHE;
 }
 
-static kmem_cache_t *fat_cache_cachep;
+static struct kmem_cache *fat_cache_cachep;
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct fat_cache *cache = (struct fat_cache *)foo;
 
@@ -63,7 +63,7 @@
 
 static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
 {
-	return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL);
+	return kmem_cache_alloc(fat_cache_cachep, GFP_KERNEL);
 }
 
 static inline void fat_cache_free(struct fat_cache *cache)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 78945b5..a9e4688 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -477,12 +477,12 @@
 	kfree(sbi);
 }
 
-static kmem_cache_t *fat_inode_cachep;
+static struct kmem_cache *fat_inode_cachep;
 
 static struct inode *fat_alloc_inode(struct super_block *sb)
 {
 	struct msdos_inode_info *ei;
-	ei = kmem_cache_alloc(fat_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(fat_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -493,7 +493,7 @@
 	kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
 
diff --git a/fs/fcntl.c b/fs/fcntl.c
index e4f2616..4740d35 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -553,7 +553,7 @@
 }
 
 static DEFINE_RWLOCK(fasync_lock);
-static kmem_cache_t *fasync_cache __read_mostly;
+static struct kmem_cache *fasync_cache __read_mostly;
 
 /*
  * fasync_helper() is used by some character device drivers (mainly mice)
@@ -567,7 +567,7 @@
 	int result = 0;
 
 	if (on) {
-		new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
+		new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
 		if (!new)
 			return -ENOMEM;
 	}
diff --git a/fs/file.c b/fs/file.c
index 8e81775..51aef67 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -21,7 +21,6 @@
 struct fdtable_defer {
 	spinlock_t lock;
 	struct work_struct wq;
-	struct timer_list timer;
 	struct fdtable *next;
 };
 
@@ -75,24 +74,10 @@
 	kfree(fdt);
 }
 
-static void fdtable_timer(unsigned long data)
+static void free_fdtable_work(struct work_struct *work)
 {
-	struct fdtable_defer *fddef = (struct fdtable_defer *)data;
-
-	spin_lock(&fddef->lock);
-	/*
-	 * If someone already emptied the queue return.
-	 */
-	if (!fddef->next)
-		goto out;
-	if (!schedule_work(&fddef->wq))
-		mod_timer(&fddef->timer, 5);
-out:
-	spin_unlock(&fddef->lock);
-}
-
-static void free_fdtable_work(struct fdtable_defer *f)
-{
+	struct fdtable_defer *f =
+		container_of(work, struct fdtable_defer, wq);
 	struct fdtable *fdt;
 
 	spin_lock_bh(&f->lock);
@@ -142,13 +127,8 @@
 		spin_lock(&fddef->lock);
 		fdt->next = fddef->next;
 		fddef->next = fdt;
-		/*
-		 * vmallocs are handled from the workqueue context.
-		 * If the per-cpu workqueue is running, then we
-		 * defer work scheduling through a timer.
-		 */
-		if (!schedule_work(&fddef->wq))
-			mod_timer(&fddef->timer, 5);
+		/* vmallocs are handled from the workqueue context */
+		schedule_work(&fddef->wq);
 		spin_unlock(&fddef->lock);
 		put_cpu_var(fdtable_defer_list);
 	}
@@ -351,10 +331,7 @@
 {
 	struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
 	spin_lock_init(&fddef->lock);
-	INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef);
-	init_timer(&fddef->timer);
-	fddef->timer.data = (unsigned long)fddef;
-	fddef->timer.function = fdtable_timer;
+	INIT_WORK(&fddef->wq, free_fdtable_work);
 	fddef->next = NULL;
 }
 
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 4786d51..0b7ae89 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -46,7 +46,7 @@
 
 extern struct inode_operations vxfs_immed_symlink_iops;
 
-kmem_cache_t		*vxfs_inode_cachep;
+struct kmem_cache		*vxfs_inode_cachep;
 
 
 #ifdef DIAGNOSTIC
@@ -103,7 +103,7 @@
 		struct vxfs_inode_info	*vip;
 		struct vxfs_dinode	*dip;
 
-		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
+		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
 			goto fail;
 		dip = (struct vxfs_dinode *)(bp->b_data + offset);
 		memcpy(vip, dip, sizeof(*vip));
@@ -145,7 +145,7 @@
 		struct vxfs_dinode	*dip;
 		caddr_t			kaddr = (char *)page_address(pp);
 
-		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
+		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
 			goto fail;
 		dip = (struct vxfs_dinode *)(kaddr + offset);
 		memcpy(vip, dip, sizeof(*vip));
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 66571ea..357764d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,7 +19,7 @@
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 
-static kmem_cache_t *fuse_req_cachep;
+static struct kmem_cache *fuse_req_cachep;
 
 static struct fuse_conn *fuse_get_conn(struct file *file)
 {
@@ -41,7 +41,7 @@
 
 struct fuse_req *fuse_request_alloc(void)
 {
-	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
+	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
 	if (req)
 		fuse_request_init(req);
 	return req;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c71a6c0..1cabdb2 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -141,9 +141,6 @@
 		struct fuse_req *forget_req;
 		struct dentry *parent;
 
-		/* Doesn't hurt to "reset" the validity timeout */
-		fuse_invalidate_entry_cache(entry);
-
 		/* For negative dentries, always do a fresh lookup */
 		if (!inode)
 			return 0;
@@ -1027,6 +1024,8 @@
 	if (attr->ia_valid & ATTR_SIZE) {
 		unsigned long limit;
 		is_truncate = 1;
+		if (IS_SWAPFILE(inode))
+			return -ETXTBSY;
 		limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
 		if (limit != RLIM_INFINITY && attr->ia_size > (loff_t) limit) {
 			send_sig(SIGXFSZ, current, 0);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 763a50d..128f79c 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -754,6 +754,42 @@
 	return err;
 }
 
+static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
+{
+	struct inode *inode = mapping->host;
+	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_req *req;
+	struct fuse_bmap_in inarg;
+	struct fuse_bmap_out outarg;
+	int err;
+
+	if (!inode->i_sb->s_bdev || fc->no_bmap)
+		return 0;
+
+	req = fuse_get_req(fc);
+	if (IS_ERR(req))
+		return 0;
+
+	memset(&inarg, 0, sizeof(inarg));
+	inarg.block = block;
+	inarg.blocksize = inode->i_sb->s_blocksize;
+	req->in.h.opcode = FUSE_BMAP;
+	req->in.h.nodeid = get_node_id(inode);
+	req->in.numargs = 1;
+	req->in.args[0].size = sizeof(inarg);
+	req->in.args[0].value = &inarg;
+	req->out.numargs = 1;
+	req->out.args[0].size = sizeof(outarg);
+	req->out.args[0].value = &outarg;
+	request_send(fc, req);
+	err = req->out.h.error;
+	fuse_put_request(fc, req);
+	if (err == -ENOSYS)
+		fc->no_bmap = 1;
+
+	return err ? 0 : outarg.block;
+}
+
 static const struct file_operations fuse_file_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= do_sync_read,
@@ -787,6 +823,7 @@
 	.commit_write	= fuse_commit_write,
 	.readpages	= fuse_readpages,
 	.set_page_dirty	= fuse_set_page_dirty,
+	.bmap		= fuse_bmap,
 };
 
 void fuse_init_file_inode(struct inode *inode)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 91edb89..b98b20d 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -298,6 +298,9 @@
 	    reply, before any other request, and never cleared */
 	unsigned conn_error : 1;
 
+	/** Connection successful.  Only set in INIT */
+	unsigned conn_init : 1;
+
 	/** Do readpages asynchronously?  Only set in INIT */
 	unsigned async_read : 1;
 
@@ -339,6 +342,9 @@
 	/** Is interrupt not implemented by fs? */
 	unsigned no_interrupt : 1;
 
+	/** Is bmap not implemented by fs? */
+	unsigned no_bmap : 1;
+
 	/** The number of requests waiting for completion */
 	atomic_t num_waiting;
 
@@ -365,6 +371,9 @@
 
 	/** Key for lock owner ID scrambling */
 	u32 scramble_key[4];
+
+	/** Reserved request for the DESTROY message */
+	struct fuse_req *destroy_req;
 };
 
 static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index fc42035..12450d2 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -22,7 +22,7 @@
 MODULE_DESCRIPTION("Filesystem in Userspace");
 MODULE_LICENSE("GPL");
 
-static kmem_cache_t *fuse_inode_cachep;
+static struct kmem_cache *fuse_inode_cachep;
 struct list_head fuse_conn_list;
 DEFINE_MUTEX(fuse_mutex);
 
@@ -39,6 +39,7 @@
 	unsigned group_id_present : 1;
 	unsigned flags;
 	unsigned max_read;
+	unsigned blksize;
 };
 
 static struct inode *fuse_alloc_inode(struct super_block *sb)
@@ -46,7 +47,7 @@
 	struct inode *inode;
 	struct fuse_inode *fi;
 
-	inode = kmem_cache_alloc(fuse_inode_cachep, SLAB_KERNEL);
+	inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
 	if (!inode)
 		return NULL;
 
@@ -205,10 +206,23 @@
 		fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb));
 }
 
+static void fuse_send_destroy(struct fuse_conn *fc)
+{
+	struct fuse_req *req = fc->destroy_req;
+	if (req && fc->conn_init) {
+		fc->destroy_req = NULL;
+		req->in.h.opcode = FUSE_DESTROY;
+		req->force = 1;
+		request_send(fc, req);
+		fuse_put_request(fc, req);
+	}
+}
+
 static void fuse_put_super(struct super_block *sb)
 {
 	struct fuse_conn *fc = get_fuse_conn_super(sb);
 
+	fuse_send_destroy(fc);
 	spin_lock(&fc->lock);
 	fc->connected = 0;
 	fc->blocked = 0;
@@ -274,6 +288,7 @@
 	OPT_DEFAULT_PERMISSIONS,
 	OPT_ALLOW_OTHER,
 	OPT_MAX_READ,
+	OPT_BLKSIZE,
 	OPT_ERR
 };
 
@@ -285,14 +300,16 @@
 	{OPT_DEFAULT_PERMISSIONS,	"default_permissions"},
 	{OPT_ALLOW_OTHER,		"allow_other"},
 	{OPT_MAX_READ,			"max_read=%u"},
+	{OPT_BLKSIZE,			"blksize=%u"},
 	{OPT_ERR,			NULL}
 };
 
-static int parse_fuse_opt(char *opt, struct fuse_mount_data *d)
+static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
 {
 	char *p;
 	memset(d, 0, sizeof(struct fuse_mount_data));
 	d->max_read = ~0;
+	d->blksize = 512;
 
 	while ((p = strsep(&opt, ",")) != NULL) {
 		int token;
@@ -345,6 +362,12 @@
 			d->max_read = value;
 			break;
 
+		case OPT_BLKSIZE:
+			if (!is_bdev || match_int(&args[0], &value))
+				return 0;
+			d->blksize = value;
+			break;
+
 		default:
 			return 0;
 		}
@@ -400,6 +423,8 @@
 void fuse_conn_put(struct fuse_conn *fc)
 {
 	if (atomic_dec_and_test(&fc->count)) {
+		if (fc->destroy_req)
+			fuse_request_free(fc->destroy_req);
 		mutex_destroy(&fc->inst_mutex);
 		kfree(fc);
 	}
@@ -456,6 +481,7 @@
 		fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
 		fc->minor = arg->minor;
 		fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
+		fc->conn_init = 1;
 	}
 	fuse_put_request(fc, req);
 	fc->blocked = 0;
@@ -500,15 +526,23 @@
 	struct dentry *root_dentry;
 	struct fuse_req *init_req;
 	int err;
+	int is_bdev = sb->s_bdev != NULL;
 
 	if (sb->s_flags & MS_MANDLOCK)
 		return -EINVAL;
 
-	if (!parse_fuse_opt((char *) data, &d))
+	if (!parse_fuse_opt((char *) data, &d, is_bdev))
 		return -EINVAL;
 
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	if (is_bdev) {
+#ifdef CONFIG_BLOCK
+		if (!sb_set_blocksize(sb, d.blksize))
+			return -EINVAL;
+#endif
+	} else {
+		sb->s_blocksize = PAGE_CACHE_SIZE;
+		sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	}
 	sb->s_magic = FUSE_SUPER_MAGIC;
 	sb->s_op = &fuse_super_operations;
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -547,6 +581,12 @@
 	if (!init_req)
 		goto err_put_root;
 
+	if (is_bdev) {
+		fc->destroy_req = fuse_request_alloc();
+		if (!fc->destroy_req)
+			goto err_put_root;
+	}
+
 	mutex_lock(&fuse_mutex);
 	err = -EINVAL;
 	if (file->private_data)
@@ -598,10 +638,47 @@
 	.kill_sb	= kill_anon_super,
 };
 
+#ifdef CONFIG_BLOCK
+static int fuse_get_sb_blk(struct file_system_type *fs_type,
+			   int flags, const char *dev_name,
+			   void *raw_data, struct vfsmount *mnt)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super,
+			   mnt);
+}
+
+static struct file_system_type fuseblk_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "fuseblk",
+	.get_sb		= fuse_get_sb_blk,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static inline int register_fuseblk(void)
+{
+	return register_filesystem(&fuseblk_fs_type);
+}
+
+static inline void unregister_fuseblk(void)
+{
+	unregister_filesystem(&fuseblk_fs_type);
+}
+#else
+static inline int register_fuseblk(void)
+{
+	return 0;
+}
+
+static inline void unregister_fuseblk(void)
+{
+}
+#endif
+
 static decl_subsys(fuse, NULL, NULL);
 static decl_subsys(connections, NULL, NULL);
 
-static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep,
+static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
 				 unsigned long flags)
 {
 	struct inode * inode = foo;
@@ -617,24 +694,34 @@
 
 	err = register_filesystem(&fuse_fs_type);
 	if (err)
-		printk("fuse: failed to register filesystem\n");
-	else {
-		fuse_inode_cachep = kmem_cache_create("fuse_inode",
-						      sizeof(struct fuse_inode),
-						      0, SLAB_HWCACHE_ALIGN,
-						      fuse_inode_init_once, NULL);
-		if (!fuse_inode_cachep) {
-			unregister_filesystem(&fuse_fs_type);
-			err = -ENOMEM;
-		}
-	}
+		goto out;
 
+	err = register_fuseblk();
+	if (err)
+		goto out_unreg;
+
+	fuse_inode_cachep = kmem_cache_create("fuse_inode",
+					      sizeof(struct fuse_inode),
+					      0, SLAB_HWCACHE_ALIGN,
+					      fuse_inode_init_once, NULL);
+	err = -ENOMEM;
+	if (!fuse_inode_cachep)
+		goto out_unreg2;
+
+	return 0;
+
+ out_unreg2:
+	unregister_fuseblk();
+ out_unreg:
+	unregister_filesystem(&fuse_fs_type);
+ out:
 	return err;
 }
 
 static void fuse_fs_cleanup(void)
 {
 	unregister_filesystem(&fuse_fs_type);
+	unregister_fuseblk();
 	kmem_cache_destroy(fuse_inode_cachep);
 }
 
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 78fe0fa..55f5333 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -35,7 +35,7 @@
 
 struct greedy {
 	struct gfs2_holder gr_gh;
-	struct work_struct gr_work;
+	struct delayed_work gr_work;
 };
 
 struct gfs2_gl_hash_bucket {
@@ -1368,9 +1368,9 @@
 	glops->go_xmote_th(gl, state, flags);
 }
 
-static void greedy_work(void *data)
+static void greedy_work(struct work_struct *work)
 {
-	struct greedy *gr = data;
+	struct greedy *gr = container_of(work, struct greedy, gr_work.work);
 	struct gfs2_holder *gh = &gr->gr_gh;
 	struct gfs2_glock *gl = gh->gh_gl;
 	const struct gfs2_glock_operations *glops = gl->gl_ops;
@@ -1422,7 +1422,7 @@
 
 	gfs2_holder_init(gl, 0, 0, gh);
 	set_bit(HIF_GREEDY, &gh->gh_iflags);
-	INIT_WORK(&gr->gr_work, greedy_work, gr);
+	INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
 
 	set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
 	schedule_delayed_work(&gr->gr_work, time);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 9889c1e..7c1a9e2 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -25,7 +25,7 @@
 #include "util.h"
 #include "glock.h"
 
-static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct gfs2_inode *ip = foo;
 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
@@ -37,7 +37,7 @@
 	}
 }
 
-static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct gfs2_glock *gl = foo;
 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 196c604..e5707a9 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -23,9 +23,9 @@
 #include "lm.h"
 #include "util.h"
 
-kmem_cache_t *gfs2_glock_cachep __read_mostly;
-kmem_cache_t *gfs2_inode_cachep __read_mostly;
-kmem_cache_t *gfs2_bufdata_cachep __read_mostly;
+struct kmem_cache *gfs2_glock_cachep __read_mostly;
+struct kmem_cache *gfs2_inode_cachep __read_mostly;
+struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
 
 void gfs2_assert_i(struct gfs2_sbd *sdp)
 {
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 76a5089..7984dcf 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -146,9 +146,9 @@
 gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__);
 
 
-extern kmem_cache_t *gfs2_glock_cachep;
-extern kmem_cache_t *gfs2_inode_cachep;
-extern kmem_cache_t *gfs2_bufdata_cachep;
+extern struct kmem_cache *gfs2_glock_cachep;
+extern struct kmem_cache *gfs2_inode_cachep;
+extern struct kmem_cache *gfs2_bufdata_cachep;
 
 static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
 					   unsigned int *p)
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 85b17b3..a369879 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -24,7 +24,7 @@
 #include "hfs_fs.h"
 #include "btree.h"
 
-static kmem_cache_t *hfs_inode_cachep;
+static struct kmem_cache *hfs_inode_cachep;
 
 MODULE_LICENSE("GPL");
 
@@ -145,7 +145,7 @@
 {
 	struct hfs_inode_info *i;
 
-	i = kmem_cache_alloc(hfs_inode_cachep, SLAB_KERNEL);
+	i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL);
 	return i ? &i->vfs_inode : NULL;
 }
 
@@ -430,7 +430,7 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-static void hfs_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct hfs_inode_info *i = p;
 
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 194eede..0f513c6 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -434,13 +434,13 @@
 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
 MODULE_LICENSE("GPL");
 
-static kmem_cache_t *hfsplus_inode_cachep;
+static struct kmem_cache *hfsplus_inode_cachep;
 
 static struct inode *hfsplus_alloc_inode(struct super_block *sb)
 {
 	struct hfsplus_inode_info *i;
 
-	i = kmem_cache_alloc(hfsplus_inode_cachep, SLAB_KERNEL);
+	i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
 	return i ? &i->vfs_inode : NULL;
 }
 
@@ -467,7 +467,7 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-static void hfsplus_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct hfsplus_inode_info *i = p;
 
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index ecc9180..594f9c4 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -84,7 +84,8 @@
 		}
 		if (!fno->dirflag) {
 			e = 1;
-			hpfs_error(inode->i_sb, "not a directory, fnode %08x",inode->i_ino);
+			hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
+					(unsigned long)inode->i_ino);
 		}
 		if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) {
 			e = 1;
@@ -144,8 +145,11 @@
 		}
 		if (de->first || de->last) {
 			if (hpfs_sb(inode->i_sb)->sb_chk) {
-				if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08x", old_pos);
-				if (de->last && (de->namelen != 1 || de ->name[0] != 255)) hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08x", old_pos);
+				if (de->first && !de->last && (de->namelen != 2
+				    || de ->name[0] != 1 || de->name[1] != 1))
+					hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos);
+				if (de->last && (de->namelen != 1 || de ->name[0] != 255))
+					hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos);
 			}
 			hpfs_brelse4(&qbh);
 			goto again;
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 229ff2f..fe83c2b 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -533,10 +533,13 @@
 			struct buffer_head *bh;
 			struct dnode *d1;
 			struct quad_buffer_head qbh1;
-			if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) {
-				hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08x", dno, up, i->i_ino);
+			if (hpfs_sb(i->i_sb)->sb_chk)
+			    if (up != i->i_ino) {
+				hpfs_error(i->i_sb,
+					"bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx",
+					dno, up, (unsigned long)i->i_ino);
 				return;
-			}
+			    }
 			if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
 				d1->up = up;
 				d1->root_dnode = 1;
@@ -851,7 +854,9 @@
 	/* Going to the next dirent */
 	if ((d = de_next_de(de)) < dnode_end_de(dnode)) {
 		if (!(++*posp & 077)) {
-			hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08x", *posp);
+			hpfs_error(inode->i_sb,
+				"map_pos_dirent: pos crossed dnode boundary; pos = %08llx",
+				(unsigned long long)*posp);
 			goto bail;
 		}
 		/* We're going down the tree */
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 66339dc..547a838 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -243,8 +243,9 @@
 		fnode->ea_offs = 0xc4;
 	}
 	if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) {
-		hpfs_error(s, "fnode %08x: ea_offs == %03x, ea_size_s == %03x",
-			inode->i_ino, fnode->ea_offs, fnode->ea_size_s);
+		hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
+			(unsigned long)inode->i_ino,
+			fnode->ea_offs, fnode->ea_size_s);
 		return;
 	}
 	if ((fnode->ea_size_s || !fnode->ea_size_l) &&
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 32ab51e..1c07aa8 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -317,7 +317,8 @@
 
 /* super.c */
 
-void hpfs_error(struct super_block *, char *, ...);
+void hpfs_error(struct super_block *, const char *, ...)
+	__attribute__((format (printf, 2, 3)));
 int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
 unsigned hpfs_count_one_bitmap(struct super_block *, secno);
 
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 7faef85..85d3e1d 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -251,7 +251,10 @@
 			de->file_size = 0;
 			hpfs_mark_4buffers_dirty(&qbh);
 			hpfs_brelse4(&qbh);
-		} else hpfs_error(i->i_sb, "directory %08x doesn't have '.' entry", i->i_ino);
+		} else
+			hpfs_error(i->i_sb,
+				"directory %08lx doesn't have '.' entry",
+				(unsigned long)i->i_ino);
 	}
 	mark_buffer_dirty(bh);
 	brelse(bh);
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index 0fecdac..c472458 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -126,32 +126,40 @@
 			struct extended_attribute *ea;
 			struct extended_attribute *ea_end;
 			if (fnode->magic != FNODE_MAGIC) {
-				hpfs_error(s, "bad magic on fnode %08x", ino);
+				hpfs_error(s, "bad magic on fnode %08lx",
+					(unsigned long)ino);
 				goto bail;
 			}
 			if (!fnode->dirflag) {
 				if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
 				    (fnode->btree.internal ? 12 : 8)) {
-					hpfs_error(s, "bad number of nodes in fnode %08x", ino);
+					hpfs_error(s,
+					   "bad number of nodes in fnode %08lx",
+					    (unsigned long)ino);
 					goto bail;
 				}
 				if (fnode->btree.first_free !=
 				    8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
-					hpfs_error(s, "bad first_free pointer in fnode %08x", ino);
+					hpfs_error(s,
+					    "bad first_free pointer in fnode %08lx",
+					    (unsigned long)ino);
 					goto bail;
 				}
 			}
 			if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 ||
 			   (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) {
-				hpfs_error(s, "bad EA info in fnode %08x: ea_offs == %04x ea_size_s == %04x",
-					ino, fnode->ea_offs, fnode->ea_size_s);
+				hpfs_error(s,
+					"bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
+					(unsigned long)ino,
+					fnode->ea_offs, fnode->ea_size_s);
 				goto bail;
 			}
 			ea = fnode_ea(fnode);
 			ea_end = fnode_end_ea(fnode);
 			while (ea != ea_end) {
 				if (ea > ea_end) {
-					hpfs_error(s, "bad EA in fnode %08x", ino);
+					hpfs_error(s, "bad EA in fnode %08lx",
+						(unsigned long)ino);
 					goto bail;
 				}
 				ea = next_ea(ea);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 450b5e0..d4abc1a 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -46,21 +46,17 @@
 }
 
 /* Filesystem error... */
+static char err_buf[1024];
 
-#define ERR_BUF_SIZE 1024
-
-void hpfs_error(struct super_block *s, char *m,...)
+void hpfs_error(struct super_block *s, const char *fmt, ...)
 {
-	char *buf;
-	va_list l;
-	va_start(l, m);
-	if (!(buf = kmalloc(ERR_BUF_SIZE, GFP_KERNEL)))
-		printk("HPFS: No memory for error message '%s'\n",m);
-	else if (vsprintf(buf, m, l) >= ERR_BUF_SIZE)
-		printk("HPFS: Grrrr... Kernel memory corrupted ... going on, but it'll crash very soon :-(\n");
-	printk("HPFS: filesystem error: ");
-	if (buf) printk("%s", buf);
-	else printk("%s\n",m);
+	va_list args;
+
+	va_start(args, fmt);
+	vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+	va_end(args);
+
+	printk("HPFS: filesystem error: %s", err_buf);
 	if (!hpfs_sb(s)->sb_was_error) {
 		if (hpfs_sb(s)->sb_err == 2) {
 			printk("; crashing the system because you wanted it\n");
@@ -76,7 +72,6 @@
 		} else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n");
 		else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n");
 	} else printk("\n");
-	kfree(buf);
 	hpfs_sb(s)->sb_was_error = 1;
 }
 
@@ -160,12 +155,12 @@
 	return 0;
 }
 
-static kmem_cache_t * hpfs_inode_cachep;
+static struct kmem_cache * hpfs_inode_cachep;
 
 static struct inode *hpfs_alloc_inode(struct super_block *sb)
 {
 	struct hpfs_inode_info *ei;
-	ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, SLAB_NOFS);
+	ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
 	if (!ei)
 		return NULL;
 	ei->vfs_inode.i_version = 1;
@@ -177,7 +172,7 @@
 	kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
 
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 7f47569..0706f5a 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -513,7 +513,7 @@
 }
 
 
-static kmem_cache_t *hugetlbfs_inode_cachep;
+static struct kmem_cache *hugetlbfs_inode_cachep;
 
 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
 {
@@ -522,7 +522,7 @@
 
 	if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
 		return NULL;
-	p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL);
+	p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
 	if (unlikely(!p)) {
 		hugetlbfs_inc_free_inodes(sbinfo);
 		return NULL;
@@ -545,7 +545,7 @@
 };
 
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
 
diff --git a/fs/inode.c b/fs/inode.c
index 26cdb11..9ecccab 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -97,7 +97,7 @@
  */
 struct inodes_stat_t inodes_stat;
 
-static kmem_cache_t * inode_cachep __read_mostly;
+static struct kmem_cache * inode_cachep __read_mostly;
 
 static struct inode *alloc_inode(struct super_block *sb)
 {
@@ -109,7 +109,7 @@
 	if (sb->s_op->alloc_inode)
 		inode = sb->s_op->alloc_inode(sb);
 	else
-		inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL);
+		inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
 
 	if (inode) {
 		struct address_space * const mapping = &inode->i_data;
@@ -209,7 +209,7 @@
 
 EXPORT_SYMBOL(inode_init_once);
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct inode * inode = (struct inode *) foo;
 
@@ -1242,9 +1242,6 @@
  */
 #ifdef CONFIG_QUOTA
 
-/* Function back in dquot.c */
-int remove_inode_dquot_ref(struct inode *, int, struct list_head *);
-
 void remove_dquot_ref(struct super_block *sb, int type,
 			struct list_head *tofree_head)
 {
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 017cb0f..e1956e6 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -34,8 +34,8 @@
 
 #include <asm/ioctls.h>
 
-static kmem_cache_t *watch_cachep __read_mostly;
-static kmem_cache_t *event_cachep __read_mostly;
+static struct kmem_cache *watch_cachep __read_mostly;
+static struct kmem_cache *event_cachep __read_mostly;
 
 static struct vfsmount *inotify_mnt __read_mostly;
 
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index c34b862..ea55b6c 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -57,12 +57,12 @@
 static void isofs_read_inode(struct inode *);
 static int isofs_statfs (struct dentry *, struct kstatfs *);
 
-static kmem_cache_t *isofs_inode_cachep;
+static struct kmem_cache *isofs_inode_cachep;
 
 static struct inode *isofs_alloc_inode(struct super_block *sb)
 {
 	struct iso_inode_info *ei;
-	ei = kmem_cache_alloc(isofs_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -73,7 +73,7 @@
 	kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct iso_inode_info *ei = foo;
 
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index b85c686..10fff94 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -31,7 +31,7 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
 #include <linux/poison.h>
@@ -1630,7 +1630,7 @@
 #define JBD_MAX_SLABS 5
 #define JBD_SLAB_INDEX(size)  (size >> 11)
 
-static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
+static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
 static const char *jbd_slab_names[JBD_MAX_SLABS] = {
 	"jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
 };
@@ -1693,7 +1693,7 @@
 /*
  * Journal_head storage management
  */
-static kmem_cache_t *journal_head_cache;
+static struct kmem_cache *journal_head_cache;
 #ifdef CONFIG_JBD_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
@@ -1996,7 +1996,7 @@
 
 #endif
 
-kmem_cache_t *jbd_handle_cache;
+struct kmem_cache *jbd_handle_cache;
 
 static int __init journal_init_handle_cache(void)
 {
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index c532429..d204ab3 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -70,8 +70,8 @@
 #include <linux/init.h>
 #endif
 
-static kmem_cache_t *revoke_record_cache;
-static kmem_cache_t *revoke_table_cache;
+static struct kmem_cache *revoke_record_cache;
+static struct kmem_cache *revoke_table_cache;
 
 /* Each revoke record represents one single revoked block.  During
    journal replay, this involves recording the transaction ID of the
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 4f82bcd6..d38e0d5 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -27,6 +27,8 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 
+static void __journal_temp_unlink_buffer(struct journal_head *jh);
+
 /*
  * get_transaction: obtain a new transaction_t object.
  *
@@ -1499,7 +1501,7 @@
  *
  * Called under j_list_lock.  The journal may not be locked.
  */
-void __journal_temp_unlink_buffer(struct journal_head *jh)
+static void __journal_temp_unlink_buffer(struct journal_head *jh)
 {
 	struct journal_head **list = NULL;
 	transaction_t *transaction;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 70b2ae1..6bd8005 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -248,8 +248,12 @@
 				bufs = 0;
 				goto write_out_data;
 			}
-		}
-		else {
+		} else if (!locked && buffer_locked(bh)) {
+			__jbd2_journal_file_buffer(jh, commit_transaction,
+						BJ_Locked);
+			jbd_unlock_bh_state(bh);
+			put_bh(bh);
+		} else {
 			BUFFER_TRACE(bh, "writeout complete: unfile");
 			__jbd2_journal_unfile_buffer(jh);
 			jbd_unlock_bh_state(bh);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c60f378..44fc32b 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -31,7 +31,7 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
 #include <linux/poison.h>
@@ -1641,7 +1641,7 @@
 #define JBD_MAX_SLABS 5
 #define JBD_SLAB_INDEX(size)  (size >> 11)
 
-static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
+static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
 static const char *jbd_slab_names[JBD_MAX_SLABS] = {
 	"jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
 };
@@ -1704,7 +1704,7 @@
 /*
  * Journal_head storage management
  */
-static kmem_cache_t *jbd2_journal_head_cache;
+static struct kmem_cache *jbd2_journal_head_cache;
 #ifdef CONFIG_JBD_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
@@ -2007,7 +2007,7 @@
 
 #endif
 
-kmem_cache_t *jbd2_handle_cache;
+struct kmem_cache *jbd2_handle_cache;
 
 static int __init journal_init_handle_cache(void)
 {
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 380d199..f506646 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -70,8 +70,8 @@
 #include <linux/init.h>
 #endif
 
-static kmem_cache_t *jbd2_revoke_record_cache;
-static kmem_cache_t *jbd2_revoke_table_cache;
+static struct kmem_cache *jbd2_revoke_record_cache;
+static struct kmem_cache *jbd2_revoke_table_cache;
 
 /* Each revoke record represents one single revoked block.  During
    journal replay, this involves recording the transaction ID of the
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index c051a94..3a87001 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -27,6 +27,8 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 
+static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
+
 /*
  * jbd2_get_transaction: obtain a new transaction_t object.
  *
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 3f7899e..9f15bce 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -61,8 +61,8 @@
 static struct inode_operations jffs_dir_inode_operations;
 static const struct address_space_operations jffs_address_operations;
 
-kmem_cache_t     *node_cache = NULL;
-kmem_cache_t     *fm_cache = NULL;
+struct kmem_cache     *node_cache = NULL;
+struct kmem_cache     *fm_cache = NULL;
 
 /* Called by the VFS at mount time to initialize the whole file system.  */
 static int jffs_fill_super(struct super_block *sb, void *data, int silent)
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index 4a543e1..d0e783f 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -66,6 +66,7 @@
 #include <linux/smp_lock.h>
 #include <linux/time.h>
 #include <linux/ctype.h>
+#include <linux/freezer.h>
 
 #include "intrep.h"
 #include "jffs_fm.h"
@@ -591,7 +592,7 @@
 	D2(printk("jffs_add_virtual_root(): "
 		  "Creating a virtual root directory.\n"));
 
-	if (!(root = kmalloc(sizeof(struct jffs_file), GFP_KERNEL))) {
+	if (!(root = kzalloc(sizeof(struct jffs_file), GFP_KERNEL))) {
 		return -ENOMEM;
 	}
 	no_jffs_file++;
@@ -603,7 +604,6 @@
 	DJM(no_jffs_node++);
 	memset(node, 0, sizeof(struct jffs_node));
 	node->ino = JFFS_MIN_INO;
-	memset(root, 0, sizeof(struct jffs_file));
 	root->ino = JFFS_MIN_INO;
 	root->mode = S_IFDIR | S_IRWXU | S_IRGRP
 		     | S_IXGRP | S_IROTH | S_IXOTH;
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c
index 29b68d9..077258b 100644
--- a/fs/jffs/jffs_fm.c
+++ b/fs/jffs/jffs_fm.c
@@ -29,8 +29,8 @@
 static struct jffs_fm *jffs_alloc_fm(void);
 static void jffs_free_fm(struct jffs_fm *n);
 
-extern kmem_cache_t     *fm_cache;
-extern kmem_cache_t     *node_cache;
+extern struct kmem_cache     *fm_cache;
+extern struct kmem_cache     *node_cache;
 
 #if CONFIG_JFFS_FS_VERBOSE > 0
 void
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index ff2a872..6eb3dae 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -16,6 +16,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/completion.h>
 #include <linux/sched.h>
+#include <linux/freezer.h>
 #include "nodelist.h"
 
 
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 33f2910..83f9881 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -19,16 +19,16 @@
 
 /* These are initialised to NULL in the kernel startup code.
    If you're porting to other operating systems, beware */
-static kmem_cache_t *full_dnode_slab;
-static kmem_cache_t *raw_dirent_slab;
-static kmem_cache_t *raw_inode_slab;
-static kmem_cache_t *tmp_dnode_info_slab;
-static kmem_cache_t *raw_node_ref_slab;
-static kmem_cache_t *node_frag_slab;
-static kmem_cache_t *inode_cache_slab;
+static struct kmem_cache *full_dnode_slab;
+static struct kmem_cache *raw_dirent_slab;
+static struct kmem_cache *raw_inode_slab;
+static struct kmem_cache *tmp_dnode_info_slab;
+static struct kmem_cache *raw_node_ref_slab;
+static struct kmem_cache *node_frag_slab;
+static struct kmem_cache *inode_cache_slab;
 #ifdef CONFIG_JFFS2_FS_XATTR
-static kmem_cache_t *xattr_datum_cache;
-static kmem_cache_t *xattr_ref_cache;
+static struct kmem_cache *xattr_datum_cache;
+static struct kmem_cache *xattr_ref_cache;
 #endif
 
 int __init jffs2_create_slab_caches(void)
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index bc4b810..7deb782 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -28,12 +28,12 @@
 
 static void jffs2_put_super(struct super_block *);
 
-static kmem_cache_t *jffs2_inode_cachep;
+static struct kmem_cache *jffs2_inode_cachep;
 
 static struct inode *jffs2_alloc_inode(struct super_block *sb)
 {
 	struct jffs2_inode_info *ei;
-	ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL);
+	ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -44,7 +44,7 @@
 	kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
 }
 
-static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
 
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index b89c9ab..5065baa 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -67,7 +67,7 @@
 #include <linux/kthread.h>
 #include <linux/buffer_head.h>		/* for sync_blockdev() */
 #include <linux/bio.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
 #include "jfs_incore.h"
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 0cccd1c..b1a1c72 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -74,7 +74,7 @@
 }
 
 #define METAPOOL_MIN_PAGES 32
-static kmem_cache_t *metapage_cache;
+static struct kmem_cache *metapage_cache;
 static mempool_t *metapage_mempool;
 
 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
@@ -180,7 +180,7 @@
 
 #endif
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct metapage *mp = (struct metapage *)foo;
 
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 81f6f04..d558e51 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -46,7 +46,7 @@
 #include <linux/vmalloc.h>
 #include <linux/smp_lock.h>
 #include <linux/completion.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kthread.h>
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 9c1c6e0..846ac8f 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -44,7 +44,7 @@
 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
 MODULE_LICENSE("GPL");
 
-static kmem_cache_t * jfs_inode_cachep;
+static struct kmem_cache * jfs_inode_cachep;
 
 static struct super_operations jfs_super_operations;
 static struct export_operations jfs_export_operations;
@@ -93,7 +93,7 @@
 	va_list args;
 
 	va_start(args, function);
-	vsprintf(error_buf, function, args);
+	vsnprintf(error_buf, sizeof(error_buf), function, args);
 	va_end(args);
 
 	printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf);
@@ -748,7 +748,7 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
 
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 3d84f60..50643b6 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -13,6 +13,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/utsname.h>
 #include <linux/smp_lock.h>
+#include <linux/freezer.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index fb24a97..3d4610c 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -36,34 +36,14 @@
 static void			nlm_gc_hosts(void);
 static struct nsm_handle *	__nsm_find(const struct sockaddr_in *,
 					const char *, int, int);
-
-/*
- * Find an NLM server handle in the cache. If there is none, create it.
- */
-struct nlm_host *
-nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
-			const char *hostname, int hostname_len)
-{
-	return nlm_lookup_host(0, sin, proto, version,
-			       hostname, hostname_len);
-}
-
-/*
- * Find an NLM client handle in the cache. If there is none, create it.
- */
-struct nlm_host *
-nlmsvc_lookup_host(struct svc_rqst *rqstp,
-			const char *hostname, int hostname_len)
-{
-	return nlm_lookup_host(1, &rqstp->rq_addr,
-			       rqstp->rq_prot, rqstp->rq_vers,
-			       hostname, hostname_len);
-}
+static struct nsm_handle *	nsm_find(const struct sockaddr_in *sin,
+					 const char *hostname,
+					 int hostname_len);
 
 /*
  * Common host lookup routine for server & client
  */
-struct nlm_host *
+static struct nlm_host *
 nlm_lookup_host(int server, const struct sockaddr_in *sin,
 					int proto, int version,
 					const char *hostname,
@@ -195,6 +175,29 @@
 }
 
 /*
+ * Find an NLM server handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
+			const char *hostname, int hostname_len)
+{
+	return nlm_lookup_host(0, sin, proto, version,
+			       hostname, hostname_len);
+}
+
+/*
+ * Find an NLM client handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmsvc_lookup_host(struct svc_rqst *rqstp,
+			const char *hostname, int hostname_len)
+{
+	return nlm_lookup_host(1, &rqstp->rq_addr,
+			       rqstp->rq_prot, rqstp->rq_vers,
+			       hostname, hostname_len);
+}
+
+/*
  * Create the NLM RPC client for an NLM peer
  */
 struct rpc_clnt *
@@ -495,7 +498,7 @@
 	return nsm;
 }
 
-struct nsm_handle *
+static struct nsm_handle *
 nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len)
 {
 	return __nsm_find(sin, hostname, hostname_len, 1);
diff --git a/fs/locks.c b/fs/locks.c
index e0b6a80..1cb0c57 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,12 +142,12 @@
 static LIST_HEAD(file_lock_list);
 static LIST_HEAD(blocked_list);
 
-static kmem_cache_t *filelock_cache __read_mostly;
+static struct kmem_cache *filelock_cache __read_mostly;
 
 /* Allocate an empty lock structure. */
 static struct file_lock *locks_alloc_lock(void)
 {
-	return kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
+	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
 }
 
 static void locks_release_private(struct file_lock *fl)
@@ -199,7 +199,7 @@
  * Initialises the fields of the file lock which are invariant for
  * free file_locks.
  */
-static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
 {
 	struct file_lock *lock = (struct file_lock *) foo;
 
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 0ff7125..deeb9dc 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -85,7 +85,7 @@
 #ifndef MB_CACHE_INDEXES_COUNT
 	int				c_indexes_count;
 #endif
-	kmem_cache_t			*c_entry_cache;
+	struct kmem_cache			*c_entry_cache;
 	struct list_head		*c_block_hash;
 	struct list_head		*c_indexes_hash[0];
 };
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 1e36bae..629e09b 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -51,12 +51,12 @@
 	return;
 }
 
-static kmem_cache_t * minix_inode_cachep;
+static struct kmem_cache * minix_inode_cachep;
 
 static struct inode *minix_alloc_inode(struct super_block *sb)
 {
 	struct minix_inode_info *ei;
-	ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, SLAB_KERNEL);
+	ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -67,7 +67,7 @@
 	kmem_cache_free(minix_inode_cachep, minix_i(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct minix_inode_info *ei = (struct minix_inode_info *) foo;
 
diff --git a/fs/namei.c b/fs/namei.c
index 28d49b3..db1bca2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -249,9 +249,11 @@
 
 	/*
 	 * MAY_EXEC on regular files requires special handling: We override
-	 * filesystem execute permissions if the mode bits aren't set.
+	 * filesystem execute permissions if the mode bits aren't set or
+	 * the fs is mounted with the "noexec" flag.
 	 */
-	if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO))
+	if ((mask & MAY_EXEC) && S_ISREG(mode) && (!(mode & S_IXUGO) ||
+			(nd && nd->mnt && (nd->mnt->mnt_flags & MNT_NOEXEC))))
 		return -EACCES;
 
 	/* Ordinary permission routines do not understand MAY_APPEND. */
@@ -1996,8 +1998,7 @@
 void dentry_unhash(struct dentry *dentry)
 {
 	dget(dentry);
-	if (atomic_read(&dentry->d_count))
-		shrink_dcache_parent(dentry);
+	shrink_dcache_parent(dentry);
 	spin_lock(&dcache_lock);
 	spin_lock(&dentry->d_lock);
 	if (atomic_read(&dentry->d_count) == 2)
diff --git a/fs/namespace.c b/fs/namespace.c
index 55442a6..b00ac84 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -36,7 +36,7 @@
 
 static struct list_head *mount_hashtable __read_mostly;
 static int hash_mask __read_mostly, hash_bits __read_mostly;
-static kmem_cache_t *mnt_cache __read_mostly;
+static struct kmem_cache *mnt_cache __read_mostly;
 static struct rw_semaphore namespace_sem;
 
 /* /sys/fs */
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 42e3bef..fae5324 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -40,12 +40,12 @@
 static void ncp_put_super(struct super_block *);
 static int  ncp_statfs(struct dentry *, struct kstatfs *);
 
-static kmem_cache_t * ncp_inode_cachep;
+static struct kmem_cache * ncp_inode_cachep;
 
 static struct inode *ncp_alloc_inode(struct super_block *sb)
 {
 	struct ncp_inode_info *ei;
-	ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, SLAB_KERNEL);
+	ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -56,7 +56,7 @@
 	kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
 
@@ -577,12 +577,12 @@
 		server->rcv.ptr = (unsigned char*)&server->rcv.buf;
 		server->rcv.len = 10;
 		server->rcv.state = 0;
-		INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server);
-		INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server);
+		INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
+		INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
 		sock->sk->sk_write_space = ncp_tcp_write_space;
 	} else {
-		INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server);
-		INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server);
+		INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
+		INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
 		server->timeout_tm.data = (unsigned long)server;
 		server->timeout_tm.function = ncpdgram_timeout_call;
 	}
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 11c2b25..e496d8b 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -350,9 +350,10 @@
 	}
 }
 
-void ncpdgram_rcv_proc(void *s)
+void ncpdgram_rcv_proc(struct work_struct *work)
 {
-	struct ncp_server *server = s;
+	struct ncp_server *server =
+		container_of(work, struct ncp_server, rcv.tq);
 	struct socket* sock;
 	
 	sock = server->ncp_sock;
@@ -468,9 +469,10 @@
 	}
 }
 
-void ncpdgram_timeout_proc(void *s)
+void ncpdgram_timeout_proc(struct work_struct *work)
 {
-	struct ncp_server *server = s;
+	struct ncp_server *server =
+		container_of(work, struct ncp_server, timeout_tq);
 	mutex_lock(&server->rcv.creq_mutex);
 	__ncpdgram_timeout_proc(server);
 	mutex_unlock(&server->rcv.creq_mutex);
@@ -652,18 +654,20 @@
 	}
 }
 
-void ncp_tcp_rcv_proc(void *s)
+void ncp_tcp_rcv_proc(struct work_struct *work)
 {
-	struct ncp_server *server = s;
+	struct ncp_server *server =
+		container_of(work, struct ncp_server, rcv.tq);
 
 	mutex_lock(&server->rcv.creq_mutex);
 	__ncptcp_rcv_proc(server);
 	mutex_unlock(&server->rcv.creq_mutex);
 }
 
-void ncp_tcp_tx_proc(void *s)
+void ncp_tcp_tx_proc(struct work_struct *work)
 {
-	struct ncp_server *server = s;
+	struct ncp_server *server =
+		container_of(work, struct ncp_server, tx.tq);
 	
 	mutex_lock(&server->rcv.creq_mutex);
 	__ncptcp_try_send(server);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 5fea638..23ab145 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@
 	INIT_LIST_HEAD(&clp->cl_state_owners);
 	INIT_LIST_HEAD(&clp->cl_unused);
 	spin_lock_init(&clp->cl_lock);
-	INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
+	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
 	clp->cl_boot_time = CURRENT_TIME;
 	clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index bdfabf8..2f488e1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -58,7 +58,7 @@
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
 
-static kmem_cache_t *nfs_direct_cachep;
+static struct kmem_cache *nfs_direct_cachep;
 
 /*
  * This represents a set of asynchronous requests that we're waiting on
@@ -143,7 +143,7 @@
 {
 	struct nfs_direct_req *dreq;
 
-	dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
+	dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
 	if (!dreq)
 		return NULL;
 
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 08cc4c5..15afa46 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -55,7 +55,7 @@
 
 static void nfs_zap_acl_cache(struct inode *);
 
-static kmem_cache_t * nfs_inode_cachep;
+static struct kmem_cache * nfs_inode_cachep;
 
 static inline unsigned long
 nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
@@ -1080,7 +1080,7 @@
 struct inode *nfs_alloc_inode(struct super_block *sb)
 {
 	struct nfs_inode *nfsi;
-	nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL);
+	nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
 	if (!nfsi)
 		return NULL;
 	nfsi->flags = 0UL;
@@ -1111,7 +1111,7 @@
 #endif
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct nfs_inode *nfsi = (struct nfs_inode *) foo;
 
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index ec1114b..371b804 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -18,10 +18,10 @@
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
 
-static void nfs_expire_automounts(void *list);
+static void nfs_expire_automounts(struct work_struct *work);
 
 LIST_HEAD(nfs_automount_list);
-static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
+static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
 int nfs_mountpoint_expiry_timeout = 500 * HZ;
 
 static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -164,9 +164,9 @@
 	.follow_link	= nfs_follow_mountpoint,
 };
 
-static void nfs_expire_automounts(void *data)
+static void nfs_expire_automounts(struct work_struct *work)
 {
-	struct list_head *list = (struct list_head *)data;
+	struct list_head *list = &nfs_automount_list;
 
 	mark_mounts_for_expiry(list);
 	if (!list_empty(list))
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6f34667..c26cd97 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,7 +185,7 @@
 extern void nfs4_schedule_state_renewal(struct nfs_client *);
 extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
 extern void nfs4_kill_renewd(struct nfs_client *);
-extern void nfs4_renew_state(void *);
+extern void nfs4_renew_state(struct work_struct *);
 
 /* nfs4state.c */
 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 7b6df18..8232985 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,9 +59,10 @@
 #define NFSDBG_FACILITY	NFSDBG_PROC
 
 void
-nfs4_renew_state(void *data)
+nfs4_renew_state(struct work_struct *work)
 {
-	struct nfs_client *clp = (struct nfs_client *)data;
+	struct nfs_client *clp =
+		container_of(work, struct nfs_client, cl_renewd.work);
 	struct rpc_cred *cred;
 	long lease, timeout;
 	unsigned long last, now;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 829af32..3fbfc2f 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -20,13 +20,13 @@
 
 #define NFS_PARANOIA 1
 
-static kmem_cache_t *nfs_page_cachep;
+static struct kmem_cache *nfs_page_cachep;
 
 static inline struct nfs_page *
 nfs_page_alloc(void)
 {
 	struct nfs_page	*p;
-	p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL);
+	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
 	if (p) {
 		memset(p, 0, sizeof(*p));
 		INIT_LIST_HEAD(&p->wb_list);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c2e49c3..244a8c4 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -38,7 +38,7 @@
 static const struct rpc_call_ops nfs_read_partial_ops;
 static const struct rpc_call_ops nfs_read_full_ops;
 
-static kmem_cache_t *nfs_rdata_cachep;
+static struct kmem_cache *nfs_rdata_cachep;
 static mempool_t *nfs_rdata_mempool;
 
 #define MIN_POOL_READ	(32)
@@ -46,7 +46,7 @@
 struct nfs_read_data *nfs_readdata_alloc(size_t len)
 {
 	unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
+	struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
 
 	if (p) {
 		memset(p, 0, sizeof(*p));
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 883dd4a..41b0728 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -85,7 +85,7 @@
 static const struct rpc_call_ops nfs_write_full_ops;
 static const struct rpc_call_ops nfs_commit_ops;
 
-static kmem_cache_t *nfs_wdata_cachep;
+static struct kmem_cache *nfs_wdata_cachep;
 static mempool_t *nfs_wdata_mempool;
 static mempool_t *nfs_commit_mempool;
 
@@ -93,7 +93,7 @@
 
 struct nfs_write_data *nfs_commit_alloc(void)
 {
-	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
+	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
 
 	if (p) {
 		memset(p, 0, sizeof(*p));
@@ -112,7 +112,7 @@
 struct nfs_write_data *nfs_writedata_alloc(size_t len)
 {
 	unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
+	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
 
 	if (p) {
 		memset(p, 0, sizeof(*p));
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index b4baca3..277df40 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -24,10 +24,6 @@
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
-#ifdef NFSD_OPTIMIZE_SPACE
-# define inline
-#endif
-
 
 /*
  * Mapping of S_IF* types to NFS file types
@@ -42,14 +38,14 @@
 /*
  * XDR functions for basic NFS types
  */
-static inline __be32 *
+static __be32 *
 encode_time3(__be32 *p, struct timespec *time)
 {
 	*p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec);
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_time3(__be32 *p, struct timespec *time)
 {
 	time->tv_sec = ntohl(*p++);
@@ -57,7 +53,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_fh(__be32 *p, struct svc_fh *fhp)
 {
 	unsigned int size;
@@ -77,7 +73,7 @@
 	return decode_fh(p, fhp);
 }
 
-static inline __be32 *
+static __be32 *
 encode_fh(__be32 *p, struct svc_fh *fhp)
 {
 	unsigned int size = fhp->fh_handle.fh_size;
@@ -91,7 +87,7 @@
  * Decode a file name and make sure that the path contains
  * no slashes or null bytes.
  */
-static inline __be32 *
+static __be32 *
 decode_filename(__be32 *p, char **namp, int *lenp)
 {
 	char		*name;
@@ -107,7 +103,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_sattr3(__be32 *p, struct iattr *iap)
 {
 	u32	tmp;
@@ -153,7 +149,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
 	      struct kstat *stat)
 {
@@ -186,7 +182,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
 {
 	struct inode	*inode = fhp->fh_dentry->d_inode;
@@ -776,7 +772,7 @@
 		return xdr_ressize_check(rqstp, p);
 }
 
-static inline __be32 *
+static __be32 *
 encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
 	     int namlen, ino_t ino)
 {
@@ -790,7 +786,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p,
 		struct svc_fh *fhp)
 {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 293b649..640c92b 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -84,10 +84,10 @@
  */
 static DEFINE_MUTEX(client_mutex);
 
-static kmem_cache_t *stateowner_slab = NULL;
-static kmem_cache_t *file_slab = NULL;
-static kmem_cache_t *stateid_slab = NULL;
-static kmem_cache_t *deleg_slab = NULL;
+static struct kmem_cache *stateowner_slab = NULL;
+static struct kmem_cache *file_slab = NULL;
+static struct kmem_cache *stateid_slab = NULL;
+static struct kmem_cache *deleg_slab = NULL;
 
 void
 nfs4_lock_state(void)
@@ -1003,7 +1003,7 @@
 }
 
 static void
-nfsd4_free_slab(kmem_cache_t **slab)
+nfsd4_free_slab(struct kmem_cache **slab)
 {
 	if (*slab == NULL)
 		return;
@@ -1829,9 +1829,8 @@
 }
 
 static struct workqueue_struct *laundry_wq;
-static struct work_struct laundromat_work;
-static void laundromat_main(void *);
-static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
+static void laundromat_main(struct work_struct *);
+static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
 
 __be32
 nfsd4_renew(clientid_t *clid)
@@ -1940,7 +1939,7 @@
 }
 
 void
-laundromat_main(void *not_used)
+laundromat_main(struct work_struct *not_used)
 {
 	time_t t;
 
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 56ebb14..f5243f9 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -18,11 +18,6 @@
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
-
-#ifdef NFSD_OPTIMIZE_SPACE
-# define inline
-#endif
-
 /*
  * Mapping of S_IF* types to NFS file types
  */
@@ -55,7 +50,7 @@
 	return decode_fh(p, fhp);
 }
 
-static inline __be32 *
+static __be32 *
 encode_fh(__be32 *p, struct svc_fh *fhp)
 {
 	memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE);
@@ -66,7 +61,7 @@
  * Decode a file name and make sure that the path contains
  * no slashes or null bytes.
  */
-static inline __be32 *
+static __be32 *
 decode_filename(__be32 *p, char **namp, int *lenp)
 {
 	char		*name;
@@ -82,7 +77,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_pathname(__be32 *p, char **namp, int *lenp)
 {
 	char		*name;
@@ -98,7 +93,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_sattr(__be32 *p, struct iattr *iap)
 {
 	u32	tmp, tmp1;
diff --git a/fs/nls/nls_cp936.c b/fs/nls/nls_cp936.c
index 046fde8..65e640c 100644
--- a/fs/nls/nls_cp936.c
+++ b/fs/nls/nls_cp936.c
@@ -4421,6 +4421,73 @@
 	c2u_F8, c2u_F9, c2u_FA, c2u_FB, c2u_FC, c2u_FD, c2u_FE, NULL,   
 };
 
+static unsigned char u2c_00[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xA1, 0xE8, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xEC, /* 0xA4-0xA7 */
+	0xA1, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xA1, 0xE3, 0xA1, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xA4, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC1, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xA8, 0xA4, 0xA8, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xA8, 0xA8, 0xA8, 0xA6, 0xA8, 0xBA, 0x00, 0x00, /* 0xE8-0xEB */
+	0xA8, 0xAC, 0xA8, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xB0, 0xA8, 0xAE, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC2, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xA8, 0xB4, 0xA8, 0xB2, 0x00, 0x00, /* 0xF8-0xFB */
+	0xA8, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
 static unsigned char u2c_01[512] = {
 	0xA8, 0xA1, 0xA8, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
@@ -10825,7 +10892,7 @@
 };
 
 static unsigned char *page_uni2charset[256] = {
-	NULL,   u2c_01, u2c_02, u2c_03, u2c_04, NULL,   NULL,   NULL,   
+	u2c_00, u2c_01, u2c_02, u2c_03, u2c_04, NULL,   NULL,   NULL,
 	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
 	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
 	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
@@ -10936,11 +11003,34 @@
 	unsigned char *uni2charset;
 	unsigned char cl = uni&0xFF;
 	unsigned char ch = (uni>>8)&0xFF;
-	int n;
+	unsigned char out0,out1;
 
 	if (boundlen <= 0)
 		return -ENAMETOOLONG;
 
+	if (uni == 0x20ac) {/* Euro symbol.The only exception with a non-ascii unicode */
+		out[0] = 0x80;
+		return 1;
+	}
+
+	if (ch == 0) { /* handle the U00 plane*/
+		/* if (cl == 0) return -EINVAL;*/ /*U0000 is legal in cp936*/
+		out0 = u2c_00[cl*2];
+		out1 = u2c_00[cl*2+1];
+		if (out0 == 0x00 && out1 == 0x00) {
+			if (cl<0x80) {
+				out[0] = cl;
+				return 1;
+			}
+			return -EINVAL;
+		} else {
+			if (boundlen <= 1)
+				return -ENAMETOOLONG;
+			out[0] = out0;
+			out[1] = out1;
+			return 2;
+		}
+	}
 
 	uni2charset = page_uni2charset[ch];
 	if (uni2charset) {
@@ -10950,15 +11040,10 @@
 		out[1] = uni2charset[cl*2+1];
 		if (out[0] == 0x00 && out[1] == 0x00)
 			return -EINVAL;
-		n = 2;
-	} else if (ch==0 && cl) {
-		out[0] = cl;
-		n = 1;
+		return 2;
 	}
 	else
 		return -EINVAL;
-
-	return n;
 }
 
 static int char2uni(const unsigned char *rawstring, int boundlen,
@@ -10972,7 +11057,11 @@
 		return -ENAMETOOLONG;
 
 	if (boundlen == 1) {
-		*uni = rawstring[0];
+		if (rawstring[0]==0x80) { /* Euro symbol.The only exception with a non-ascii unicode */
+			*uni = 0x20ac;
+		} else {
+			*uni = rawstring[0];
+		}
 		return 1;
 	}
 
@@ -10986,7 +11075,11 @@
 			return -EINVAL;
 		n = 2;
 	} else{
-		*uni = ch;
+		if (ch==0x80) {/* Euro symbol.The only exception with a non-ascii unicode */
+			*uni = 0x20ac;
+		} else {
+			*uni = ch;
+		}
 		n = 1;
 	}
 	return n;
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 9f08e85..c577d8e 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1272,7 +1272,7 @@
 {
 	ntfs_attr_search_ctx *ctx;
 
-	ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, SLAB_NOFS);
+	ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
 	if (ctx)
 		ntfs_attr_init_search_ctx(ctx, ni, mrec);
 	return ctx;
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index e32cde4..2194eff 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -38,7 +38,7 @@
 {
 	ntfs_index_context *ictx;
 
-	ictx = kmem_cache_alloc(ntfs_index_ctx_cache, SLAB_NOFS);
+	ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS);
 	if (ictx)
 		*ictx = (ntfs_index_context){ .idx_ni = idx_ni };
 	return ictx;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 2d3de9c8..2479898 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -324,7 +324,7 @@
 	ntfs_inode *ni;
 
 	ntfs_debug("Entering.");
-	ni = kmem_cache_alloc(ntfs_big_inode_cache, SLAB_NOFS);
+	ni = kmem_cache_alloc(ntfs_big_inode_cache, GFP_NOFS);
 	if (likely(ni != NULL)) {
 		ni->state = 0;
 		return VFS_I(ni);
@@ -349,7 +349,7 @@
 	ntfs_inode *ni;
 
 	ntfs_debug("Entering.");
-	ni = kmem_cache_alloc(ntfs_inode_cache, SLAB_NOFS);
+	ni = kmem_cache_alloc(ntfs_inode_cache, GFP_NOFS);
 	if (likely(ni != NULL)) {
 		ni->state = 0;
 		return ni;
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c
index 6a495f7..005ca4b 100644
--- a/fs/ntfs/unistr.c
+++ b/fs/ntfs/unistr.c
@@ -266,7 +266,7 @@
 
 	/* We do not trust outside sources. */
 	if (likely(ins)) {
-		ucs = kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS);
+		ucs = kmem_cache_alloc(ntfs_name_cache, GFP_NOFS);
 		if (likely(ucs)) {
 			for (i = o = 0; i < ins_len; i += wc_len) {
 				wc_len = nls->char2uni(ins + i, ins_len - i,
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 85a048b..edc91ca 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1197,10 +1197,12 @@
 	return status;
 }
 
-static void ocfs2_truncate_log_worker(void *data)
+static void ocfs2_truncate_log_worker(struct work_struct *work)
 {
 	int status;
-	struct ocfs2_super *osb = data;
+	struct ocfs2_super *osb =
+		container_of(work, struct ocfs2_super,
+			     osb_truncate_log_wq.work);
 
 	mlog_entry_void();
 
@@ -1432,7 +1434,8 @@
 	/* ocfs2_truncate_log_shutdown keys on the existence of
 	 * osb->osb_tl_inode so we don't set any of the osb variables
 	 * until we're sure all is well. */
-	INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb);
+	INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
+			  ocfs2_truncate_log_worker);
 	osb->osb_tl_bh    = tl_bh;
 	osb->osb_tl_inode = tl_inode;
 
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 305cba3..4cd9a95 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -141,7 +141,7 @@
 	 * recognizes a node going up and down in one iteration */
 	u64			hr_generation;
 
-	struct work_struct	hr_write_timeout_work;
+	struct delayed_work	hr_write_timeout_work;
 	unsigned long		hr_last_timeout_start;
 
 	/* Used during o2hb_check_slot to hold a copy of the block
@@ -156,9 +156,11 @@
 	int               wc_error;
 };
 
-static void o2hb_write_timeout(void *arg)
+static void o2hb_write_timeout(struct work_struct *work)
 {
-	struct o2hb_region *reg = arg;
+	struct o2hb_region *reg =
+		container_of(work, struct o2hb_region,
+			     hr_write_timeout_work.work);
 
 	mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
 	     "milliseconds\n", reg->hr_dev_name,
@@ -1404,7 +1406,7 @@
 		goto out;
 	}
 
-	INIT_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout, reg);
+	INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
 
 	/*
 	 * A node is considered live after it has beat LIVE_THRESHOLD
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 7bba98f..4705d65 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -88,7 +88,7 @@
 	o2quo_fence_self();
 }
 
-static void o2quo_make_decision(void *arg)
+static void o2quo_make_decision(struct work_struct *work)
 {
 	int quorum;
 	int lowest_hb, lowest_reachable = 0, fence = 0;
@@ -306,7 +306,7 @@
 	struct o2quo_state *qs = &o2quo_state;
 
 	spin_lock_init(&qs->qs_lock);
-	INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL);
+	INIT_WORK(&qs->qs_work, o2quo_make_decision);
 }
 
 void o2quo_exit(void)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index b650efa..9b3209d 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -140,11 +140,11 @@
 		 [O2NET_ERR_DIED]	= -EHOSTDOWN,};
 
 /* can't quite avoid *all* internal declarations :/ */
-static void o2net_sc_connect_completed(void *arg);
-static void o2net_rx_until_empty(void *arg);
-static void o2net_shutdown_sc(void *arg);
+static void o2net_sc_connect_completed(struct work_struct *work);
+static void o2net_rx_until_empty(struct work_struct *work);
+static void o2net_shutdown_sc(struct work_struct *work);
 static void o2net_listen_data_ready(struct sock *sk, int bytes);
-static void o2net_sc_send_keep_req(void *arg);
+static void o2net_sc_send_keep_req(struct work_struct *work);
 static void o2net_idle_timer(unsigned long data);
 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
 
@@ -308,10 +308,10 @@
 	o2nm_node_get(node);
 	sc->sc_node = node;
 
-	INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc);
-	INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc);
-	INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc);
-	INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc);
+	INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
+	INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
+	INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
+	INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
 
 	init_timer(&sc->sc_idle_timeout);
 	sc->sc_idle_timeout.function = o2net_idle_timer;
@@ -342,7 +342,7 @@
 		sc_put(sc);
 }
 static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
-					struct work_struct *work,
+					struct delayed_work *work,
 					int delay)
 {
 	sc_get(sc);
@@ -350,7 +350,7 @@
 		sc_put(sc);
 }
 static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
-					 struct work_struct *work)
+					 struct delayed_work *work)
 {
 	if (cancel_delayed_work(work))
 		sc_put(sc);
@@ -564,9 +564,11 @@
  * ourselves as state_change couldn't get the nn_lock and call set_nn_state
  * itself.
  */
-static void o2net_shutdown_sc(void *arg)
+static void o2net_shutdown_sc(struct work_struct *work)
 {
-	struct o2net_sock_container *sc = arg;
+	struct o2net_sock_container *sc =
+		container_of(work, struct o2net_sock_container,
+			     sc_shutdown_work);
 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
 
 	sclog(sc, "shutting down\n");
@@ -1201,9 +1203,10 @@
 /* this work func is triggerd by data ready.  it reads until it can read no
  * more.  it interprets 0, eof, as fatal.  if data_ready hits while we're doing
  * our work the work struct will be marked and we'll be called again. */
-static void o2net_rx_until_empty(void *arg)
+static void o2net_rx_until_empty(struct work_struct *work)
 {
-	struct o2net_sock_container *sc = arg;
+	struct o2net_sock_container *sc =
+		container_of(work, struct o2net_sock_container, sc_rx_work);
 	int ret;
 
 	do {
@@ -1249,9 +1252,11 @@
 
 /* called when a connect completes and after a sock is accepted.  the
  * rx path will see the response and mark the sc valid */
-static void o2net_sc_connect_completed(void *arg)
+static void o2net_sc_connect_completed(struct work_struct *work)
 {
-	struct o2net_sock_container *sc = arg;
+	struct o2net_sock_container *sc =
+		container_of(work, struct o2net_sock_container,
+			     sc_connect_work);
 
 	mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
               (unsigned long long)O2NET_PROTOCOL_VERSION,
@@ -1262,9 +1267,11 @@
 }
 
 /* this is called as a work_struct func. */
-static void o2net_sc_send_keep_req(void *arg)
+static void o2net_sc_send_keep_req(struct work_struct *work)
 {
-	struct o2net_sock_container *sc = arg;
+	struct o2net_sock_container *sc =
+		container_of(work, struct o2net_sock_container,
+			     sc_keepalive_work.work);
 
 	o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
 	sc_put(sc);
@@ -1314,14 +1321,15 @@
  * having a connect attempt fail, etc. This centralizes the logic which decides
  * if a connect attempt should be made or if we should give up and all future
  * transmit attempts should fail */
-static void o2net_start_connect(void *arg)
+static void o2net_start_connect(struct work_struct *work)
 {
-	struct o2net_node *nn = arg;
+	struct o2net_node *nn =
+		container_of(work, struct o2net_node, nn_connect_work.work);
 	struct o2net_sock_container *sc = NULL;
 	struct o2nm_node *node = NULL, *mynode = NULL;
 	struct socket *sock = NULL;
 	struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
-	int ret = 0;
+	int ret = 0, stop;
 
 	/* if we're greater we initiate tx, otherwise we accept */
 	if (o2nm_this_node() <= o2net_num_from_nn(nn))
@@ -1342,10 +1350,9 @@
 
 	spin_lock(&nn->nn_lock);
 	/* see if we already have one pending or have given up */
-	if (nn->nn_sc || nn->nn_persistent_error)
-		arg = NULL;
+	stop = (nn->nn_sc || nn->nn_persistent_error);
 	spin_unlock(&nn->nn_lock);
-	if (arg == NULL) /* *shrug*, needed some indicator */
+	if (stop)
 		goto out;
 
 	nn->nn_last_connect_attempt = jiffies;
@@ -1421,9 +1428,10 @@
 	return;
 }
 
-static void o2net_connect_expired(void *arg)
+static void o2net_connect_expired(struct work_struct *work)
 {
-	struct o2net_node *nn = arg;
+	struct o2net_node *nn =
+		container_of(work, struct o2net_node, nn_connect_expired.work);
 
 	spin_lock(&nn->nn_lock);
 	if (!nn->nn_sc_valid) {
@@ -1436,9 +1444,10 @@
 	spin_unlock(&nn->nn_lock);
 }
 
-static void o2net_still_up(void *arg)
+static void o2net_still_up(struct work_struct *work)
 {
-	struct o2net_node *nn = arg;
+	struct o2net_node *nn =
+		container_of(work, struct o2net_node, nn_still_up.work);
 
 	o2quo_hb_still_up(o2net_num_from_nn(nn));
 }
@@ -1644,9 +1653,9 @@
 	return ret;
 }
 
-static void o2net_accept_many(void *arg)
+static void o2net_accept_many(struct work_struct *work)
 {
-	struct socket *sock = arg;
+	struct socket *sock = o2net_listen_sock;
 	while (o2net_accept_one(sock) == 0)
 		cond_resched();
 }
@@ -1700,7 +1709,7 @@
 	write_unlock_bh(&sock->sk->sk_callback_lock);
 
 	o2net_listen_sock = sock;
-	INIT_WORK(&o2net_listen_work, o2net_accept_many, sock);
+	INIT_WORK(&o2net_listen_work, o2net_accept_many);
 
 	sock->sk->sk_reuse = 1;
 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
@@ -1819,9 +1828,10 @@
 		struct o2net_node *nn = o2net_nn_from_num(i);
 
 		spin_lock_init(&nn->nn_lock);
-		INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn);
-		INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn);
-		INIT_WORK(&nn->nn_still_up, o2net_still_up, nn);
+		INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
+		INIT_DELAYED_WORK(&nn->nn_connect_expired,
+				  o2net_connect_expired);
+		INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
 		/* until we see hb from a node we'll return einval */
 		nn->nn_persistent_error = -ENOTCONN;
 		init_waitqueue_head(&nn->nn_sc_wq);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 4b46aac..daebbd3 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -86,18 +86,18 @@
 	 * connect attempt fails and so can be self-arming.  shutdown is
 	 * careful to first mark the nn such that no connects will be attempted
 	 * before canceling delayed connect work and flushing the queue. */
-	struct work_struct		nn_connect_work;
+	struct delayed_work		nn_connect_work;
 	unsigned long			nn_last_connect_attempt;
 
 	/* this is queued as nodes come up and is canceled when a connection is
 	 * established.  this expiring gives up on the node and errors out
 	 * transmits */
-	struct work_struct		nn_connect_expired;
+	struct delayed_work		nn_connect_expired;
 
 	/* after we give up on a socket we wait a while before deciding
 	 * that it is still heartbeating and that we should do some
 	 * quorum work */
-	struct work_struct		nn_still_up;
+	struct delayed_work		nn_still_up;
 };
 
 struct o2net_sock_container {
@@ -129,7 +129,7 @@
 	struct work_struct	sc_shutdown_work;
 
 	struct timer_list	sc_idle_timeout;
-	struct work_struct	sc_keepalive_work;
+	struct delayed_work	sc_keepalive_work;
 
 	unsigned		sc_handshake_ok:1;
 
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index fa96818..6b6ff76 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -153,7 +153,7 @@
  * called functions that cannot be directly called from the
  * net message handlers for some reason, usually because
  * they need to send net messages of their own. */
-void dlm_dispatch_work(void *data);
+void dlm_dispatch_work(struct work_struct *work);
 
 struct dlm_lock_resource;
 struct dlm_work_item;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index f6cdab3..420a375 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1297,7 +1297,7 @@
 
 	spin_lock_init(&dlm->work_lock);
 	INIT_LIST_HEAD(&dlm->work_list);
-	INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm);
+	INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
 
 	kref_init(&dlm->dlm_refs);
 	dlm->dlm_state = DLM_CTXT_NEW;
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 16b8d1b..941acf1 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -66,7 +66,7 @@
 static struct inode_operations dlmfs_dir_inode_operations;
 static struct inode_operations dlmfs_root_inode_operations;
 static struct inode_operations dlmfs_file_inode_operations;
-static kmem_cache_t *dlmfs_inode_cache;
+static struct kmem_cache *dlmfs_inode_cache;
 
 struct workqueue_struct *user_dlm_worker;
 
@@ -257,7 +257,7 @@
 }
 
 static void dlmfs_init_once(void *foo,
-			    kmem_cache_t *cachep,
+			    struct kmem_cache *cachep,
 			    unsigned long flags)
 {
 	struct dlmfs_inode_private *ip =
@@ -276,7 +276,7 @@
 {
 	struct dlmfs_inode_private *ip;
 
-	ip = kmem_cache_alloc(dlmfs_inode_cache, SLAB_NOFS);
+	ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
 	if (!ip)
 		return NULL;
 
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index f784177..856012b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -221,7 +221,7 @@
 #endif  /*  0  */
 
 
-static kmem_cache_t *dlm_mle_cache = NULL;
+static struct kmem_cache *dlm_mle_cache = NULL;
 
 
 static void dlm_mle_release(struct kref *kref);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 9d950d7..fb3e2b0 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -153,9 +153,10 @@
 }
 
 /* Worker function used during recovery. */
-void dlm_dispatch_work(void *data)
+void dlm_dispatch_work(struct work_struct *work)
 {
-	struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
+	struct dlm_ctxt *dlm =
+		container_of(work, struct dlm_ctxt, dispatched_work);
 	LIST_HEAD(tmp_list);
 	struct list_head *iter, *iter2;
 	struct dlm_work_item *item;
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index eead48b..7d2f578 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -171,15 +171,14 @@
 		BUG();
 }
 
-static void user_dlm_unblock_lock(void *opaque);
+static void user_dlm_unblock_lock(struct work_struct *work);
 
 static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
 {
 	if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
 		user_dlm_grab_inode_ref(lockres);
 
-		INIT_WORK(&lockres->l_work, user_dlm_unblock_lock,
-			  lockres);
+		INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
 
 		queue_work(user_dlm_worker, &lockres->l_work);
 		lockres->l_flags |= USER_LOCK_QUEUED;
@@ -279,10 +278,11 @@
 	iput(inode);
 }
 
-static void user_dlm_unblock_lock(void *opaque)
+static void user_dlm_unblock_lock(struct work_struct *work)
 {
 	int new_level, status;
-	struct user_lock_res *lockres = (struct user_lock_res *) opaque;
+	struct user_lock_res *lockres =
+		container_of(work, struct user_lock_res, l_work);
 	struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
 
 	mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index fcd4475..80ac69f 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -61,7 +61,7 @@
 	struct ocfs2_extent_map_entry *right_ent;
 };
 
-static kmem_cache_t *ocfs2_em_ent_cachep = NULL;
+static struct kmem_cache *ocfs2_em_ent_cachep = NULL;
 
 
 static struct ocfs2_extent_map_entry *
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 46a378f..1a7dd29 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -106,7 +106,7 @@
 #define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL)
 #define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL)
 
-extern kmem_cache_t *ocfs2_inode_cache;
+extern struct kmem_cache *ocfs2_inode_cache;
 
 extern const struct address_space_operations ocfs2_aops;
 
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index c0ad7cb..1d7f4ab 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -703,11 +703,12 @@
  * NOTE: This function can and will sleep on recovery of other nodes
  * during cluster locking, just like any other ocfs2 process.
  */
-void ocfs2_complete_recovery(void *data)
+void ocfs2_complete_recovery(struct work_struct *work)
 {
 	int ret;
-	struct ocfs2_super *osb = data;
-	struct ocfs2_journal *journal = osb->journal;
+	struct ocfs2_journal *journal =
+		container_of(work, struct ocfs2_journal, j_recovery_work);
+	struct ocfs2_super *osb = journal->j_osb;
 	struct ocfs2_dinode *la_dinode, *tl_dinode;
 	struct ocfs2_la_recovery_item *item;
 	struct list_head *p, *n;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index d86cb96..899112a 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -133,7 +133,7 @@
 }
 
 /* Exported only for the journal struct init code in super.c. Do not call. */
-void ocfs2_complete_recovery(void *data);
+void ocfs2_complete_recovery(struct work_struct *work);
 
 /*
  *  Journal Control:
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 0788837..b767fd7 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -285,7 +285,7 @@
 	/* Truncate log info */
 	struct inode			*osb_tl_inode;
 	struct buffer_head		*osb_tl_bh;
-	struct work_struct		osb_truncate_log_wq;
+	struct delayed_work		osb_truncate_log_wq;
 
 	struct ocfs2_node_map		osb_recovering_orphan_dirs;
 	unsigned int			*osb_orphan_wipes;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index b099257..4bf3954 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -68,7 +68,7 @@
 
 #include "buffer_head_io.h"
 
-static kmem_cache_t *ocfs2_inode_cachep = NULL;
+static struct kmem_cache *ocfs2_inode_cachep = NULL;
 
 /* OCFS2 needs to schedule several differnt types of work which
  * require cluster locking, disk I/O, recovery waits, etc. Since these
@@ -303,7 +303,7 @@
 {
 	struct ocfs2_inode_info *oi;
 
-	oi = kmem_cache_alloc(ocfs2_inode_cachep, SLAB_NOFS);
+	oi = kmem_cache_alloc(ocfs2_inode_cachep, GFP_NOFS);
 	if (!oi)
 		return NULL;
 
@@ -914,7 +914,7 @@
 }
 
 static void ocfs2_inode_init_once(void *data,
-				  kmem_cache_t *cachep,
+				  struct kmem_cache *cachep,
 				  unsigned long flags)
 {
 	struct ocfs2_inode_info *oi = data;
@@ -1365,7 +1365,7 @@
 	spin_lock_init(&journal->j_lock);
 	journal->j_trans_id = (unsigned long) 1;
 	INIT_LIST_HEAD(&journal->j_la_cleanups);
-	INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb);
+	INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
 	journal->j_state = OCFS2_JOURNAL_FREE;
 
 	/* get some pseudo constants for clustersize bits */
@@ -1674,7 +1674,7 @@
 	va_list args;
 
 	va_start(args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 
 	/* Not using mlog here because we want to show the actual
@@ -1695,7 +1695,7 @@
 	va_list args;
 
 	va_start(args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 
 	printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n",
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 9707ed7..39814b9 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -69,7 +69,7 @@
 	sector_t	c_block;
 };
 
-static kmem_cache_t *ocfs2_uptodate_cachep = NULL;
+static struct kmem_cache *ocfs2_uptodate_cachep = NULL;
 
 void ocfs2_metadata_cache_init(struct inode *inode)
 {
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 592a640..26f44e0 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -330,13 +330,13 @@
 	return 0;
 }
 
-static kmem_cache_t *op_inode_cachep;
+static struct kmem_cache *op_inode_cachep;
 
 static struct inode *openprom_alloc_inode(struct super_block *sb)
 {
 	struct op_inode_info *oi;
 
-	oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL);
+	oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL);
 	if (!oi)
 		return NULL;
 
@@ -415,7 +415,7 @@
 	.kill_sb	= kill_anon_super,
 };
 
-static void op_inode_init_once(void *data, kmem_cache_t * cachep, unsigned long flags)
+static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct op_inode_info *oi = (struct op_inode_info *) data;
 
diff --git a/fs/partitions/amiga.c b/fs/partitions/amiga.c
index 3068528..9917a8c 100644
--- a/fs/partitions/amiga.c
+++ b/fs/partitions/amiga.c
@@ -43,6 +43,7 @@
 			if (warn_no_part)
 				printk("Dev %s: unable to read RDB block %d\n",
 				       bdevname(bdev, b), blk);
+			res = -1;
 			goto rdb_done;
 		}
 		if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK))
@@ -79,6 +80,7 @@
 			if (warn_no_part)
 				printk("Dev %s: unable to read partition block %d\n",
 				       bdevname(bdev, b), blk);
+			res = -1;
 			goto rdb_done;
 		}
 		pb  = (struct PartitionBlock *)data;
diff --git a/fs/partitions/atari.c b/fs/partitions/atari.c
index 192a6ad..1f3572d 100644
--- a/fs/partitions/atari.c
+++ b/fs/partitions/atari.c
@@ -88,7 +88,7 @@
 			if (!xrs) {
 				printk (" block %ld read failed\n", partsect);
 				put_dev_sector(sect);
-				return 0;
+				return -1;
 			}
 
 			/* ++roman: sanity check: bit 0 of flg field must be set */
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 6fb4b61..1901137 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -153,7 +153,7 @@
 check_partition(struct gendisk *hd, struct block_device *bdev)
 {
 	struct parsed_partitions *state;
-	int i, res;
+	int i, res, err;
 
 	state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
 	if (!state)
@@ -165,19 +165,30 @@
 		sprintf(state->name, "p");
 
 	state->limit = hd->minors;
-	i = res = 0;
+	i = res = err = 0;
 	while (!res && check_part[i]) {
 		memset(&state->parts, 0, sizeof(state->parts));
 		res = check_part[i++](state, bdev);
+		if (res < 0) {
+			/* We have hit an I/O error which we don't report now.
+		 	* But record it, and let the others do their job.
+		 	*/
+			err = res;
+			res = 0;
+		}
+
 	}
 	if (res > 0)
 		return state;
+	if (!err)
+	/* The partition is unrecognized. So report I/O errors if there were any */
+		res = err;
 	if (!res)
 		printk(" unknown partition table\n");
 	else if (warn_no_part)
 		printk(" unable to read partition table\n");
 	kfree(state);
-	return NULL;
+	return ERR_PTR(res);
 }
 
 /*
@@ -494,6 +505,8 @@
 		disk->fops->revalidate_disk(disk);
 	if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
 		return 0;
+	if (IS_ERR(state))	/* I/O error reading the partition table */
+		return PTR_ERR(state);
 	for (p = 1; p < state->limit; p++) {
 		sector_t size = state->parts[p].size;
 		sector_t from = state->parts[p].from;
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index d352a73..9f7ad42 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -43,7 +43,7 @@
 int
 ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
 {
-	int blocksize, offset, size;
+	int blocksize, offset, size,res;
 	loff_t i_size;
 	dasd_information_t *info;
 	struct hd_geometry *geo;
@@ -56,15 +56,16 @@
 	unsigned char *data;
 	Sector sect;
 
+	res = 0;
 	blocksize = bdev_hardsect_size(bdev);
 	if (blocksize <= 0)
-		return 0;
+		goto out_exit;
 	i_size = i_size_read(bdev->bd_inode);
 	if (i_size == 0)
-		return 0;
+		goto out_exit;
 
 	if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL)
-		goto out_noinfo;
+		goto out_exit;
 	if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL)
 		goto out_nogeo;
 	if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL)
@@ -72,7 +73,7 @@
 
 	if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 ||
 	    ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
-		goto out_noioctl;
+		goto out_freeall;
 
 	/*
 	 * Get volume label, extract name and type.
@@ -92,6 +93,8 @@
 	EBCASC(type, 4);
 	EBCASC(name, 6);
 
+	res = 1;
+
 	/*
 	 * Three different types: CMS1, VOL1 and LNX1/unlabeled
 	 */
@@ -156,6 +159,9 @@
 			counter++;
 			blk++;
 		}
+		if (!data)
+		/* Are we not supposed to report this ? */
+			goto out_readerr;
 	} else {
 		/*
 		 * Old style LNX1 or unlabeled disk
@@ -171,18 +177,17 @@
 	}
 
 	printk("\n");
-	kfree(label);
-	kfree(geo);
-	kfree(info);
-	return 1;
+	goto out_freeall;
+
 
 out_readerr:
-out_noioctl:
+	res = -1;
+out_freeall:
 	kfree(label);
 out_nolab:
 	kfree(geo);
 out_nogeo:
 	kfree(info);
-out_noinfo:
-	return 0;
+out_exit:
+	return res;
 }
diff --git a/fs/pipe.c b/fs/pipe.c
index b1626f2..ae36b89 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -830,7 +830,14 @@
 static struct vfsmount *pipe_mnt __read_mostly;
 static int pipefs_delete_dentry(struct dentry *dentry)
 {
-	return 1;
+	/*
+	 * At creation time, we pretended this dentry was hashed
+	 * (by clearing DCACHE_UNHASHED bit in d_flags)
+	 * At delete time, we restore the truth : not hashed.
+	 * (so that dput() can proceed correctly)
+	 */
+	dentry->d_flags |= DCACHE_UNHASHED;
+	return 0;
 }
 
 static struct dentry_operations pipefs_dentry_operations = {
@@ -891,17 +898,22 @@
 	if (!inode)
 		goto err_file;
 
-	sprintf(name, "[%lu]", inode->i_ino);
+	this.len = sprintf(name, "[%lu]", inode->i_ino);
 	this.name = name;
-	this.len = strlen(name);
-	this.hash = inode->i_ino; /* will go */
+	this.hash = 0;
 	err = -ENOMEM;
 	dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this);
 	if (!dentry)
 		goto err_inode;
 
 	dentry->d_op = &pipefs_dentry_operations;
-	d_add(dentry, inode);
+	/*
+	 * We dont want to publish this dentry into global dentry hash table.
+	 * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
+	 * This permits a working /proc/$pid/fd/XXX on pipes
+	 */
+	dentry->d_flags &= ~DCACHE_UNHASHED;
+	d_instantiate(dentry, inode);
 	f->f_vfsmnt = mntget(pipe_mnt);
 	f->f_dentry = dentry;
 	f->f_mapping = inode->i_mapping;
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 7431d7b..f6c7762 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -8,8 +8,9 @@
 proc-$(CONFIG_MMU)	:= mmu.o task_mmu.o
 
 proc-y       += inode.o root.o base.o generic.o array.o \
-		kmsg.o proc_tty.o proc_misc.o
+		proc_tty.o proc_misc.o
 
 proc-$(CONFIG_PROC_KCORE)	+= kcore.o
 proc-$(CONFIG_PROC_VMCORE)	+= vmcore.o
 proc-$(CONFIG_PROC_DEVICETREE)	+= proc_devtree.o
+proc-$(CONFIG_PRINTK)	+= kmsg.o
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 795319c..b859fc7 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -683,8 +683,6 @@
 	char buffer[PROC_NUMBUF], *end;
 	int oom_adjust;
 
-	if (!capable(CAP_SYS_RESOURCE))
-		return -EPERM;
 	memset(buffer, 0, sizeof(buffer));
 	if (count > sizeof(buffer) - 1)
 		count = sizeof(buffer) - 1;
@@ -699,6 +697,10 @@
 	task = get_proc_task(file->f_dentry->d_inode);
 	if (!task)
 		return -ESRCH;
+	if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
+		put_task_struct(task);
+		return -EACCES;
+	}
 	task->oomkilladj = oom_adjust;
 	put_task_struct(task);
 	if (end - buffer == 0)
@@ -1883,8 +1885,9 @@
 	return;
 }
 
-struct dentry *proc_pid_instantiate(struct inode *dir,
-	struct dentry * dentry, struct task_struct *task, void *ptr)
+static struct dentry *proc_pid_instantiate(struct inode *dir,
+					   struct dentry * dentry,
+					   struct task_struct *task, void *ptr)
 {
 	struct dentry *error = ERR_PTR(-ENOENT);
 	struct inode *inode;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 49dfb2a..e26945b 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -81,14 +81,14 @@
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 }
 
-static kmem_cache_t * proc_inode_cachep;
+static struct kmem_cache * proc_inode_cachep;
 
 static struct inode *proc_alloc_inode(struct super_block *sb)
 {
 	struct proc_inode *ei;
 	struct inode *inode;
 
-	ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL);
+	ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	ei->pid = NULL;
@@ -105,7 +105,7 @@
 	kmem_cache_free(proc_inode_cachep, PROC_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct proc_inode *ei = (struct proc_inode *) foo;
 
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 1294eda..1be7308 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -22,6 +22,7 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
+#define CORE_STR "CORE"
 
 static int open_kcore(struct inode * inode, struct file * filp)
 {
@@ -82,10 +83,11 @@
 	}
 	*elf_buflen =	sizeof(struct elfhdr) + 
 			(*nphdr + 2)*sizeof(struct elf_phdr) + 
-			3 * (sizeof(struct elf_note) + 4) +
-			sizeof(struct elf_prstatus) +
-			sizeof(struct elf_prpsinfo) +
-			sizeof(struct task_struct);
+			3 * ((sizeof(struct elf_note)) +
+			     roundup(sizeof(CORE_STR), 4)) +
+			roundup(sizeof(struct elf_prstatus), 4) +
+			roundup(sizeof(struct elf_prpsinfo), 4) +
+			roundup(sizeof(struct task_struct), 4);
 	*elf_buflen = PAGE_ALIGN(*elf_buflen);
 	return size + *elf_buflen;
 }
@@ -210,7 +212,7 @@
 	nhdr->p_offset	= offset;
 
 	/* set up the process status */
-	notes[0].name = "CORE";
+	notes[0].name = CORE_STR;
 	notes[0].type = NT_PRSTATUS;
 	notes[0].datasz = sizeof(struct elf_prstatus);
 	notes[0].data = &prstatus;
@@ -221,7 +223,7 @@
 	bufp = storenote(&notes[0], bufp);
 
 	/* set up the process info */
-	notes[1].name	= "CORE";
+	notes[1].name	= CORE_STR;
 	notes[1].type	= NT_PRPSINFO;
 	notes[1].datasz	= sizeof(struct elf_prpsinfo);
 	notes[1].data	= &prpsinfo;
@@ -238,7 +240,7 @@
 	bufp = storenote(&notes[1], bufp);
 
 	/* set up the task structure */
-	notes[2].name	= "CORE";
+	notes[2].name	= CORE_STR;
 	notes[2].type	= NT_TASKSTRUCT;
 	notes[2].datasz	= sizeof(struct task_struct);
 	notes[2].data	= current;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 93c43b6..51815ce 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -696,9 +696,11 @@
 	proc_symlink("mounts", NULL, "self/mounts");
 
 	/* And now for trickier ones */
+#ifdef CONFIG_PRINTK
 	entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
 	if (entry)
 		entry->proc_fops = &proc_kmsg_operations;
+#endif
 	create_seq_entry("devices", 0, &proc_devinfo_operations);
 	create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
 #ifdef CONFIG_BLOCK
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 5a41db2..c047dc6 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -515,12 +515,12 @@
 	brelse(bh);
 }
 
-static kmem_cache_t *qnx4_inode_cachep;
+static struct kmem_cache *qnx4_inode_cachep;
 
 static struct inode *qnx4_alloc_inode(struct super_block *sb)
 {
 	struct qnx4_inode_info *ei;
-	ei = kmem_cache_alloc(qnx4_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -531,7 +531,7 @@
 	kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep,
+static void init_once(void *foo, struct kmem_cache * cachep,
 		      unsigned long flags)
 {
 	struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index ac14318..373d862 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -317,12 +317,11 @@
 			/* area filled with zeroes, to supply as list of zero blocknumbers
 			   We allocate it outside of loop just in case loop would spin for
 			   several iterations. */
-			char *zeros = kmalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC);	// We cannot insert more than MAX_ITEM_LEN bytes anyway.
+			char *zeros = kzalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC);	// We cannot insert more than MAX_ITEM_LEN bytes anyway.
 			if (!zeros) {
 				res = -ENOMEM;
 				goto error_exit_free_blocks;
 			}
-			memset(zeros, 0, to_paste * UNFM_P_SIZE);
 			do {
 				to_paste =
 				    min_t(__u64, hole_size,
@@ -407,6 +406,8 @@
 				   we restart it. This will also free the path. */
 				if (journal_transaction_should_end
 				    (th, th->t_blocks_allocated)) {
+					inode->i_size = cpu_key_k_offset(&key) +
+						(to_paste << inode->i_blkbits);
 					res =
 					    restart_transaction(th, inode,
 								&path);
@@ -1045,6 +1046,7 @@
 			char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
 			memset(kaddr, 0, from);
 			kunmap_atomic(kaddr, KM_USER0);
+			flush_dcache_page(prepared_pages[0]);
 		}
 		if (to != PAGE_CACHE_SIZE) {	/* Last page needs to be partially zeroed */
 			char *kaddr =
@@ -1052,6 +1054,7 @@
 					KM_USER0);
 			memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
 			kunmap_atomic(kaddr, KM_USER0);
+			flush_dcache_page(prepared_pages[num_pages - 1]);
 		}
 
 		/* Since all blocks are new - use already calculated value */
@@ -1185,6 +1188,7 @@
 					memset(kaddr + block_start, 0,
 					       from - block_start);
 					kunmap_atomic(kaddr, KM_USER0);
+					flush_dcache_page(prepared_pages[0]);
 					set_buffer_uptodate(bh);
 				}
 			}
@@ -1222,6 +1226,7 @@
 							KM_USER0);
 					memset(kaddr + to, 0, block_end - to);
 					kunmap_atomic(kaddr, KM_USER0);
+					flush_dcache_page(prepared_pages[num_pages - 1]);
 					set_buffer_uptodate(bh);
 				}
 			}
@@ -1307,56 +1312,8 @@
 			count = MAX_NON_LFS - (unsigned long)*ppos;
 	}
 
-	if (file->f_flags & O_DIRECT) {	// Direct IO needs treatment
-		ssize_t result, after_file_end = 0;
-		if ((*ppos + count >= inode->i_size)
-		    || (file->f_flags & O_APPEND)) {
-			/* If we are appending a file, we need to put this savelink in here.
-			   If we will crash while doing direct io, finish_unfinished will
-			   cut the garbage from the file end. */
-			reiserfs_write_lock(inode->i_sb);
-			err =
-			    journal_begin(&th, inode->i_sb,
-					  JOURNAL_PER_BALANCE_CNT);
-			if (err) {
-				reiserfs_write_unlock(inode->i_sb);
-				return err;
-			}
-			reiserfs_update_inode_transaction(inode);
-			add_save_link(&th, inode, 1 /* Truncate */ );
-			after_file_end = 1;
-			err =
-			    journal_end(&th, inode->i_sb,
-					JOURNAL_PER_BALANCE_CNT);
-			reiserfs_write_unlock(inode->i_sb);
-			if (err)
-				return err;
-		}
-		result = do_sync_write(file, buf, count, ppos);
-
-		if (after_file_end) {	/* Now update i_size and remove the savelink */
-			struct reiserfs_transaction_handle th;
-			reiserfs_write_lock(inode->i_sb);
-			err = journal_begin(&th, inode->i_sb, 1);
-			if (err) {
-				reiserfs_write_unlock(inode->i_sb);
-				return err;
-			}
-			reiserfs_update_inode_transaction(inode);
-			mark_inode_dirty(inode);
-			err = journal_end(&th, inode->i_sb, 1);
-			if (err) {
-				reiserfs_write_unlock(inode->i_sb);
-				return err;
-			}
-			err = remove_save_link(inode, 1 /* truncate */ );
-			reiserfs_write_unlock(inode->i_sb);
-			if (err)
-				return err;
-		}
-
-		return result;
-	}
+	if (file->f_flags & O_DIRECT)
+		return do_sync_write(file, buf, count, ppos);
 
 	if (unlikely((ssize_t) count < 0))
 		return -EINVAL;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9c69bca..254239e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -216,11 +216,12 @@
 	BUG_ON(!th->t_trans_id);
 	BUG_ON(!th->t_refcount);
 
+	pathrelse(path);
+
 	/* we cannot restart while nested */
 	if (th->t_refcount > 1) {
 		return 0;
 	}
-	pathrelse(path);
 	reiserfs_update_sd(th, inode);
 	err = journal_end(th, s, len);
 	if (!err) {
@@ -928,15 +929,12 @@
 			if (blocks_needed == 1) {
 				un = &unf_single;
 			} else {
-				un = kmalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC);	// We need to avoid scheduling.
+				un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC);	// We need to avoid scheduling.
 				if (!un) {
 					un = &unf_single;
 					blocks_needed = 1;
 					max_to_insert = 0;
-				} else
-					memset(un, 0,
-					       UNFM_P_SIZE * min(blocks_needed,
-								 max_to_insert));
+				}
 			}
 			if (blocks_needed <= max_to_insert) {
 				/* we are going to add target block to the file. Use allocated
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index ac93174..7280a23 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -104,7 +104,7 @@
 			       struct reiserfs_journal *journal);
 static int dirty_one_transaction(struct super_block *s,
 				 struct reiserfs_journal_list *jl);
-static void flush_async_commits(void *p);
+static void flush_async_commits(struct work_struct *work);
 static void queue_log_writer(struct super_block *s);
 
 /* values for join in do_journal_begin_r */
@@ -2836,7 +2836,8 @@
 	if (reiserfs_mounted_fs_count <= 1)
 		commit_wq = create_workqueue("reiserfs");
 
-	INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
+	INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
+	journal->j_work_sb = p_s_sb;
 	return 0;
       free_and_return:
 	free_journal_ram(p_s_sb);
@@ -3447,10 +3448,11 @@
 /*
 ** writeback the pending async commits to disk
 */
-static void flush_async_commits(void *p)
+static void flush_async_commits(struct work_struct *work)
 {
-	struct super_block *p_s_sb = p;
-	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+	struct reiserfs_journal *journal =
+		container_of(work, struct reiserfs_journal, j_work.work);
+	struct super_block *p_s_sb = journal->j_work_sb;
 	struct reiserfs_journal_list *jl;
 	struct list_head *entry;
 
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 1724999..7fb5fb0 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -490,13 +490,13 @@
 	return;
 }
 
-static kmem_cache_t *reiserfs_inode_cachep;
+static struct kmem_cache *reiserfs_inode_cachep;
 
 static struct inode *reiserfs_alloc_inode(struct super_block *sb)
 {
 	struct reiserfs_inode_info *ei;
 	ei = (struct reiserfs_inode_info *)
-	    kmem_cache_alloc(reiserfs_inode_cachep, SLAB_KERNEL);
+	    kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -507,7 +507,7 @@
 	kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
 
@@ -1549,13 +1549,12 @@
 	struct reiserfs_sb_info *sbi;
 	int errval = -EINVAL;
 
-	sbi = kmalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
+	sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
 	if (!sbi) {
 		errval = -ENOMEM;
 		goto error;
 	}
 	s->s_fs_info = sbi;
-	memset(sbi, 0, sizeof(struct reiserfs_sb_info));
 	/* Set default values for options: non-aggressive tails, RO on errors */
 	REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
 	REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index ddcd9e1..c5af088 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -550,12 +550,12 @@
 	}
 }
 
-static kmem_cache_t * romfs_inode_cachep;
+static struct kmem_cache * romfs_inode_cachep;
 
 static struct inode *romfs_alloc_inode(struct super_block *sb)
 {
 	struct romfs_inode_info *ei;
-	ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, SLAB_KERNEL);
+	ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -566,7 +566,7 @@
 	kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct romfs_inode_info *ei = (struct romfs_inode_info *) foo;
 
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 555b9ac..10690aa 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -26,7 +26,7 @@
  *	ERR_PTR(error).  In the end of sequence they return %NULL. ->show()
  *	returns 0 in case of success and negative number in case of error.
  */
-int seq_open(struct file *file, struct seq_operations *op)
+int seq_open(struct file *file, const struct seq_operations *op)
 {
 	struct seq_file *p = file->private_data;
 
@@ -408,7 +408,7 @@
 
 int single_release(struct inode *inode, struct file *file)
 {
-	struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
+	const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
 	int res = seq_release(inode, file);
 	kfree(op);
 	return res;
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 2c122ee..4af4cd7 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -50,12 +50,12 @@
 static int  smb_statfs(struct dentry *, struct kstatfs *);
 static int  smb_show_options(struct seq_file *, struct vfsmount *);
 
-static kmem_cache_t *smb_inode_cachep;
+static struct kmem_cache *smb_inode_cachep;
 
 static struct inode *smb_alloc_inode(struct super_block *sb)
 {
 	struct smb_inode_info *ei;
-	ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, SLAB_KERNEL);
+	ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -66,7 +66,7 @@
 	kmem_cache_free(smb_inode_cachep, SMB_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct smb_inode_info *ei = (struct smb_inode_info *) foo;
 	unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR;
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index 0fb7469..a4bcae8 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -25,7 +25,7 @@
 #define ROUND_UP(x) (((x)+3) & ~3)
 
 /* cache for request structures */
-static kmem_cache_t *req_cachep;
+static struct kmem_cache *req_cachep;
 
 static int smb_request_send_req(struct smb_request *req);
 
@@ -61,7 +61,7 @@
 	struct smb_request *req;
 	unsigned char *buf = NULL;
 
-	req = kmem_cache_alloc(req_cachep, SLAB_KERNEL);
+	req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
 	VERBOSE("allocating request: %p\n", req);
 	if (!req)
 		goto out;
diff --git a/fs/stat.c b/fs/stat.c
index bca07eb..a0ebfc7 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -51,13 +51,6 @@
 		return inode->i_op->getattr(mnt, dentry, stat);
 
 	generic_fillattr(inode, stat);
-	if (!stat->blksize) {
-		struct super_block *s = inode->i_sb;
-		unsigned blocks;
-		blocks = (stat->size+s->s_blocksize-1) >> s->s_blocksize_bits;
-		stat->blocks = (s->s_blocksize / 512) * blocks;
-		stat->blksize = s->s_blocksize;
-	}
 	return 0;
 }
 
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 20551a1..e503f85 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -16,7 +16,7 @@
 
 struct vfsmount *sysfs_mount;
 struct super_block * sysfs_sb = NULL;
-kmem_cache_t *sysfs_dir_cachep;
+struct kmem_cache *sysfs_dir_cachep;
 
 static struct super_operations sysfs_ops = {
 	.statfs		= simple_statfs,
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 6f3d6bd..bd7cec2 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -1,6 +1,6 @@
 
 extern struct vfsmount * sysfs_mount;
-extern kmem_cache_t *sysfs_dir_cachep;
+extern struct kmem_cache *sysfs_dir_cachep;
 
 extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *);
 extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *));
diff --git a/fs/sysv/CHANGES b/fs/sysv/CHANGES
deleted file mode 100644
index 66ea6e9..0000000
--- a/fs/sysv/CHANGES
+++ /dev/null
@@ -1,60 +0,0 @@
-Mon, 15 Dec 1997	  Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-	*    namei.c: struct sysv_dir_inode_operations updated to use dentries.
-
-Fri, 23 Jan 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-	*    inode.c: corrected 1 track offset setting (in sb->sv_block_base).
-		      Originally it was overridden (by setting to zero)
-		      in detected_[xenix,sysv4,sysv2,coherent]. Thanks
-		      to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
-		      for identifying the problem.
-
-Tue, 27 Jan 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-        *    inode.c: added 2048-byte block support to SystemV FS.
-		      Merged detected_bs[512,1024,2048]() into one function:
-		      void detected_bs (u_char type, struct super_block *sb).
-		      Thanks to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
-		      for the patch.
-
-Wed, 4 Feb 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-	*    namei.c: removed static subdir(); is_subdir() from dcache.c
-		      is used instead. Cosmetic changes.
-
-Thu, 3 Dec 1998   Al Viro (viro@parcelfarce.linux.theplanet.co.uk)
-	*    namei.c (sysv_rmdir):
-		      Bugectomy: old check for victim being busy
-		      (inode->i_count) wasn't replaced (with checking
-		      dentry->d_count) and escaped Linus in the last round
-		      of changes. Shot and buried.
-
-Wed, 9 Dec 1998   AV
-	*    namei.c (do_sysv_rename):
-		       Fixed incorrect check for other owners + race.
-		       Removed checks that went to VFS.
-	*    namei.c (sysv_unlink):
-		       Removed checks that went to VFS.
-
-Thu, 10 Dec 1998   AV
-	*    namei.c (do_mknod):
-			Removed dead code - mknod is never asked to
-			create a symlink or directory. Incidentially,
-			it wouldn't do it right if it would be called.
-
-Sat, 26 Dec 1998   KGB
-	*    inode.c (detect_sysv4):
-			Added detection of expanded s_type field (0x10,
-			0x20 and 0x30).  Forced read-only access in this case.
-
-Sun, 21 Mar 1999   AV
-	*    namei.c (sysv_link):
-			Fixed i_count usage that resulted in dcache corruption.
-	*    inode.c:
-			Filled ->delete_inode() method with sysv_delete_inode().
-			sysv_put_inode() is gone, as it tried to do ->delete_
-			_inode()'s job.
-	*    ialloc.c: (sysv_free_inode):
-			Fixed race.
-
-Sun, 30 Apr 1999   AV
-	*    namei.c (sysv_mknod):
-			Removed dead code (S_IFREG case is now passed to
-			->create() by VFS).
diff --git a/fs/sysv/ChangeLog b/fs/sysv/ChangeLog
deleted file mode 100644
index f403f8b..0000000
--- a/fs/sysv/ChangeLog
+++ /dev/null
@@ -1,106 +0,0 @@
-Thu Feb 14 2002  Andrew Morton  <akpm@zip.com.au>
-
-	* dir_commit_chunk(): call writeout_one_page() as well as
-	  waitfor_one_page() for IS_SYNC directories, so that we
-	  actually do sync the directory. (forward-port from 2.4).
-
-Thu Feb  7 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* super.c: switched to ->get_sb()
-	* ChangeLog: fixed dates ;-)
-
-2002-01-24  David S. Miller  <davem@redhat.com>
-
-	* inode.c: Include linux/init.h
-
-Mon Jan 21 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-	* ialloc.c (sysv_new_inode): zero SYSV_I(inode)->i_data out.
-	* i_vnode renamed to vfs_inode.  Sorry, but let's keep that
-	  consistent.
-
-Sat Jan 19 2002  Christoph Hellwig  <hch@infradead.org>
-
-	* include/linux/sysv_fs.h (SYSV_I): Get fs-private inode data using
-		list_entry() instead of inode->u.
-	* include/linux/sysv_fs_i.h: Add 'struct inode  i_vnode' field to
-		sysv_inode_info structure.
-	* inode.c: Include <linux/slab.h>, implement alloc_inode/destroy_inode
-		sop methods, add infrastructure for per-fs inode slab cache.
-	* super.c (init_sysv_fs): Initialize inode cache, recover properly
-		in the case of failed register_filesystem for V7.
-	(exit_sysv_fs): Destroy inode cache.
-
-Sat Jan 19 2002  Christoph Hellwig  <hch@infradead.org>
-
-	* include/linux/sysv_fs.h: Include <linux/sysv_fs_i.h>, declare SYSV_I().
-	* dir.c (sysv_find_entry): Use SYSV_I() instead of ->u.sysv_i to
-		access fs-private inode data.
-	* ialloc.c (sysv_new_inode): Likewise.
-	* inode.c (sysv_read_inode): Likewise.
-	(sysv_update_inode): Likewise.
-	* itree.c (get_branch): Likewise.
-	(sysv_truncate): Likewise.
-	* symlink.c (sysv_readlink): Likewise.
-	(sysv_follow_link): Likewise.
-
-Fri Jan  4 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* ialloc.c (sysv_free_inode): Use sb->s_id instead of bdevname().
-	* inode.c (sysv_read_inode): Likewise.
-	  (sysv_update_inode): Likewise.
-	  (sysv_sync_inode): Likewise.
-	* super.c (detect_sysv): Likewise.
-	  (complete_read_super): Likewise.
-	  (sysv_read_super): Likewise.
-	  (v7_read_super): Likewise.
-
-Sun Dec 30 2001  Manfred Spraul  <manfred@colorfullife.com>
-
-	* dir.c (dir_commit_chunk): Do not set dir->i_version.
-	(sysv_readdir): Likewise.
-
-Thu Dec 27 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* itree.c (get_block): Use map_bh() to fill out bh_result.
-
-Tue Dec 25 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* super.c (sysv_read_super): Use sb_set_blocksize() to set blocksize.
-	  (v7_read_super): Likewise.
-
-Tue Nov 27 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* itree.c (get_block): Change type for iblock argument to sector_t.
-	* super.c (sysv_read_super): Set s_blocksize early.
-	  (v7_read_super): Likewise.
-	* balloc.c (sysv_new_block): Use sb_bread(). instead of bread().
-	  (sysv_count_free_blocks): Likewise.
-	* ialloc.c (sysv_raw_inode): Likewise.
-	* itree.c (get_branch): Likewise.
-	  (free_branches): Likewise.
-	* super.c (sysv_read_super): Likewise.
-	  (v7_read_super): Likewise.
-
-Sat Dec 15 2001  Christoph Hellwig  <hch@infradead.org>
-
-	* inode.c (sysv_read_inode): Mark inode as bad in case of failure.
-	* super.c (complete_read_super): Check for bad root inode.
-
-Wed Nov 21 2001  Andrew Morton  <andrewm@uow.edu.au>
-
-	* file.c (sysv_sync_file): Call fsync_inode_data_buffers.
-
-Fri Oct 26 2001  Christoph Hellwig  <hch@infradead.org>
-
-	* dir.c, ialloc.c, namei.c, include/linux/sysv_fs_i.h:
-	Implement per-Inode lookup offset cache.
-	Modelled after Ted's ext2 patch.
-
-Fri Oct 26 2001  Christoph Hellwig  <hch@infradead.org>
-
-	* inode.c, super.c, include/linux/sysv_fs.h,
-	  include/linux/sysv_fs_sb.h:
-	Remove symlink faking.	Noone really wants to use these as
-	linux filesystems and native OSes don't support it anyway.
-
-
diff --git a/fs/sysv/INTRO b/fs/sysv/INTRO
deleted file mode 100644
index de4e4d1..0000000
--- a/fs/sysv/INTRO
+++ /dev/null
@@ -1,182 +0,0 @@
-This is the implementation of the SystemV/Coherent filesystem for Linux.
-It grew out of separate filesystem implementations
-
-    Xenix FS      Doug Evans <dje@cygnus.com>  June 1992
-    SystemV FS    Paul B. Monday <pmonday@eecs.wsu.edu> March-June 1993
-    Coherent FS   B. Haible <haible@ma2s2.mathematik.uni-karlsruhe.de> June 1993
-
-and was merged together in July 1993.
-
-These filesystems are rather similar. Here is a comparison with Minix FS:
-
-* Linux fdisk reports on partitions
-  - Minix FS     0x81 Linux/Minix
-  - Xenix FS     ??
-  - SystemV FS   ??
-  - Coherent FS  0x08 AIX bootable
-
-* Size of a block or zone (data allocation unit on disk)
-  - Minix FS     1024
-  - Xenix FS     1024 (also 512 ??)
-  - SystemV FS   1024 (also 512 and 2048)
-  - Coherent FS   512
-
-* General layout: all have one boot block, one super block and
-  separate areas for inodes and for directories/data.
-  On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
-  all the block numbers (including the super block) are offset by one track.
-
-* Byte ordering of "short" (16 bit entities) on disk:
-  - Minix FS     little endian  0 1
-  - Xenix FS     little endian  0 1
-  - SystemV FS   little endian  0 1
-  - Coherent FS  little endian  0 1
-  Of course, this affects only the file system, not the data of files on it!
-
-* Byte ordering of "long" (32 bit entities) on disk:
-  - Minix FS     little endian  0 1 2 3
-  - Xenix FS     little endian  0 1 2 3
-  - SystemV FS   little endian  0 1 2 3
-  - Coherent FS  PDP-11         2 3 0 1
-  Of course, this affects only the file system, not the data of files on it!
-
-* Inode on disk: "short", 0 means non-existent, the root dir ino is:
-  - Minix FS                            1
-  - Xenix FS, SystemV FS, Coherent FS   2
-
-* Maximum number of hard links to a file:
-  - Minix FS     250
-  - Xenix FS     ??
-  - SystemV FS   ??
-  - Coherent FS  >=10000
-
-* Free inode management:
-  - Minix FS                             a bitmap
-  - Xenix FS, SystemV FS, Coherent FS
-      There is a cache of a certain number of free inodes in the super-block.
-      When it is exhausted, new free inodes are found using a linear search.
-
-* Free block management:
-  - Minix FS                             a bitmap
-  - Xenix FS, SystemV FS, Coherent FS
-      Free blocks are organized in a "free list". Maybe a misleading term,
-      since it is not true that every free block contains a pointer to
-      the next free block. Rather, the free blocks are organized in chunks
-      of limited size, and every now and then a free block contains pointers
-      to the free blocks pertaining to the next chunk; the first of these
-      contains pointers and so on. The list terminates with a "block number"
-      0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
-
-* Super-block location:
-  - Minix FS     block 1 = bytes 1024..2047
-  - Xenix FS     block 1 = bytes 1024..2047
-  - SystemV FS   bytes 512..1023
-  - Coherent FS  block 1 = bytes 512..1023
-
-* Super-block layout:
-  - Minix FS
-                    unsigned short s_ninodes;
-                    unsigned short s_nzones;
-                    unsigned short s_imap_blocks;
-                    unsigned short s_zmap_blocks;
-                    unsigned short s_firstdatazone;
-                    unsigned short s_log_zone_size;
-                    unsigned long s_max_size;
-                    unsigned short s_magic;
-  - Xenix FS, SystemV FS, Coherent FS
-                    unsigned short s_firstdatazone;
-                    unsigned long  s_nzones;
-                    unsigned short s_fzone_count;
-                    unsigned long  s_fzones[NICFREE];
-                    unsigned short s_finode_count;
-                    unsigned short s_finodes[NICINOD];
-                    char           s_flock;
-                    char           s_ilock;
-                    char           s_modified;
-                    char           s_rdonly;
-                    unsigned long  s_time;
-                    short          s_dinfo[4]; -- SystemV FS only
-                    unsigned long  s_free_zones;
-                    unsigned short s_free_inodes;
-                    short          s_dinfo[4]; -- Xenix FS only
-                    unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
-                    char           s_fname[6];
-                    char           s_fpack[6];
-    then they differ considerably:
-        Xenix FS
-                    char           s_clean;
-                    char           s_fill[371];
-                    long           s_magic;
-                    long           s_type;
-        SystemV FS
-                    long           s_fill[12 or 14];
-                    long           s_state;
-                    long           s_magic;
-                    long           s_type;
-        Coherent FS
-                    unsigned long  s_unique;
-    Note that Coherent FS has no magic.
-
-* Inode layout:
-  - Minix FS
-                    unsigned short i_mode;
-                    unsigned short i_uid;
-                    unsigned long  i_size;
-                    unsigned long  i_time;
-                    unsigned char  i_gid;
-                    unsigned char  i_nlinks;
-                    unsigned short i_zone[7+1+1];
-  - Xenix FS, SystemV FS, Coherent FS
-                    unsigned short i_mode;
-                    unsigned short i_nlink;
-                    unsigned short i_uid;
-                    unsigned short i_gid;
-                    unsigned long  i_size;
-                    unsigned char  i_zone[3*(10+1+1+1)];
-                    unsigned long  i_atime;
-                    unsigned long  i_mtime;
-                    unsigned long  i_ctime;
-
-* Regular file data blocks are organized as
-  - Minix FS
-               7 direct blocks
-               1 indirect block (pointers to blocks)
-               1 double-indirect block (pointer to pointers to blocks)
-  - Xenix FS, SystemV FS, Coherent FS
-              10 direct blocks
-               1 indirect block (pointers to blocks)
-               1 double-indirect block (pointer to pointers to blocks)
-               1 triple-indirect block (pointer to pointers to pointers to blocks)
-
-* Inode size, inodes per block
-  - Minix FS        32   32
-  - Xenix FS        64   16
-  - SystemV FS      64   16
-  - Coherent FS     64    8
-
-* Directory entry on disk
-  - Minix FS
-                    unsigned short inode;
-                    char name[14/30];
-  - Xenix FS, SystemV FS, Coherent FS
-                    unsigned short inode;
-                    char name[14];
-
-* Dir entry size, dir entries per block
-  - Minix FS     16/32    64/32
-  - Xenix FS     16       64
-  - SystemV FS   16       64
-  - Coherent FS  16       32
-
-* How to implement symbolic links such that the host fsck doesn't scream:
-  - Minix FS     normal
-  - Xenix FS     kludge: as regular files with  chmod 1000
-  - SystemV FS   ??
-  - Coherent FS  kludge: as regular files with  chmod 1000
-
-
-Notation: We often speak of a "block" but mean a zone (the allocation unit)
-and not the disk driver's notion of "block".
-
-
-Bruno Haible  <haible@ma2s2.mathematik.uni-karlsruhe.de>
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index d63c5e4..ead9864 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -301,13 +301,13 @@
 	unlock_kernel();
 }
 
-static kmem_cache_t *sysv_inode_cachep;
+static struct kmem_cache *sysv_inode_cachep;
 
 static struct inode *sysv_alloc_inode(struct super_block *sb)
 {
 	struct sysv_inode_info *si;
 
-	si = kmem_cache_alloc(sysv_inode_cachep, SLAB_KERNEL);
+	si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL);
 	if (!si)
 		return NULL;
 	return &si->vfs_inode;
@@ -318,7 +318,7 @@
 	kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
 }
 
-static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct sysv_inode_info *si = (struct sysv_inode_info *)p;
 
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 1aea6a4..1dbc295 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -107,12 +107,12 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-static kmem_cache_t * udf_inode_cachep;
+static struct kmem_cache * udf_inode_cachep;
 
 static struct inode *udf_alloc_inode(struct super_block *sb)
 {
 	struct udf_inode_info *ei;
-	ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL);
+	ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 
@@ -130,7 +130,7 @@
 	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct udf_inode_info *ei = (struct udf_inode_info *) foo;
 
@@ -1709,7 +1709,7 @@
 		sb->s_dirt = 1;
 	}
 	va_start(args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 	printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
 		sb->s_id, function, error_buf);
@@ -1721,7 +1721,7 @@
 	va_list args;
 
 	va_start (args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 	printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n",
 		sb->s_id, function, error_buf);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index ec79e30..8a8e938 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -224,7 +224,7 @@
 		sb->s_flags |= MS_RDONLY;
 	}
 	va_start (args, fmt);
-	vsprintf (error_buf, fmt, args);
+	vsnprintf (error_buf, sizeof(error_buf), fmt, args);
 	va_end (args);
 	switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) {
 	case UFS_MOUNT_ONERROR_PANIC:
@@ -255,7 +255,7 @@
 		sb->s_dirt = 1;
 	}
 	va_start (args, fmt);
-	vsprintf (error_buf, fmt, args);
+	vsnprintf (error_buf, sizeof(error_buf), fmt, args);
 	va_end (args);
 	sb->s_flags |= MS_RDONLY;
 	printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n",
@@ -268,7 +268,7 @@
 	va_list args;
 
 	va_start (args, fmt);
-	vsprintf (error_buf, fmt, args);
+	vsnprintf (error_buf, sizeof(error_buf), fmt, args);
 	va_end (args);
 	printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n",
 		sb->s_id, function, error_buf);
@@ -1204,12 +1204,12 @@
 	return 0;
 }
 
-static kmem_cache_t * ufs_inode_cachep;
+static struct kmem_cache * ufs_inode_cachep;
 
 static struct inode *ufs_alloc_inode(struct super_block *sb)
 {
 	struct ufs_inode_info *ei;
-	ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL);
+	ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	ei->vfs_inode.i_version = 1;
@@ -1221,7 +1221,7 @@
 	kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
 
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 28fce6c..7dd12bb 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -299,7 +299,7 @@
 
 #define ubh_get_addr16(ubh,begin) \
 	(((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
-	((begin) & (uspi->fsize>>1) - 1)))
+	((begin) & ((uspi->fsize>>1) - 1)))
 
 #define ubh_get_addr32(ubh,begin) \
 	(((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 09360cf..8e6b56f 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -149,9 +149,10 @@
  */
 STATIC void
 xfs_end_bio_delalloc(
-	void			*data)
+	struct work_struct	*work)
 {
-	xfs_ioend_t		*ioend = data;
+	xfs_ioend_t		*ioend =
+		container_of(work, xfs_ioend_t, io_work);
 
 	xfs_destroy_ioend(ioend);
 }
@@ -161,9 +162,10 @@
  */
 STATIC void
 xfs_end_bio_written(
-	void			*data)
+	struct work_struct	*work)
 {
-	xfs_ioend_t		*ioend = data;
+	xfs_ioend_t		*ioend =
+		container_of(work, xfs_ioend_t, io_work);
 
 	xfs_destroy_ioend(ioend);
 }
@@ -176,9 +178,10 @@
  */
 STATIC void
 xfs_end_bio_unwritten(
-	void			*data)
+	struct work_struct	*work)
 {
-	xfs_ioend_t		*ioend = data;
+	xfs_ioend_t		*ioend =
+		container_of(work, xfs_ioend_t, io_work);
 	bhv_vnode_t		*vp = ioend->io_vnode;
 	xfs_off_t		offset = ioend->io_offset;
 	size_t			size = ioend->io_size;
@@ -220,11 +223,11 @@
 	ioend->io_size = 0;
 
 	if (type == IOMAP_UNWRITTEN)
-		INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+		INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
 	else if (type == IOMAP_DELAY)
-		INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+		INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
 	else
-		INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
+		INIT_WORK(&ioend->io_work, xfs_end_bio_written);
 
 	return ioend;
 }
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d338284..4fb01ff 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -32,6 +32,7 @@
 #include <linux/kthread.h>
 #include <linux/migrate.h>
 #include <linux/backing-dev.h>
+#include <linux/freezer.h>
 
 STATIC kmem_zone_t *xfs_buf_zone;
 STATIC kmem_shaker_t xfs_buf_shake;
@@ -994,9 +995,10 @@
 
 STATIC void
 xfs_buf_iodone_work(
-	void			*v)
+	struct work_struct	*work)
 {
-	xfs_buf_t		*bp = (xfs_buf_t *)v;
+	xfs_buf_t		*bp =
+		container_of(work, xfs_buf_t, b_iodone_work);
 
 	if (bp->b_iodone)
 		(*(bp->b_iodone))(bp);
@@ -1017,10 +1019,10 @@
 
 	if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
 		if (schedule) {
-			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
+			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
 			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
 		} else {
-			xfs_buf_iodone_work(bp);
+			xfs_buf_iodone_work(&bp->b_iodone_work);
 		}
 	} else {
 		up(&bp->b_iodonesema);
@@ -1825,11 +1827,11 @@
 	if (!xfs_buf_zone)
 		goto out_free_trace_buf;
 
-	xfslogd_workqueue = create_workqueue("xfslogd");
+	xfslogd_workqueue = create_freezeable_workqueue("xfslogd");
 	if (!xfslogd_workqueue)
 		goto out_free_buf_zone;
 
-	xfsdatad_workqueue = create_workqueue("xfsdatad");
+	xfsdatad_workqueue = create_freezeable_workqueue("xfsdatad");
 	if (!xfsdatad_workqueue)
 		goto out_destroy_xfslogd_workqueue;
 
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index de05abb..b93265b 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -56,6 +56,7 @@
 #include <linux/mempool.h>
 #include <linux/writeback.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 STATIC struct quotactl_ops xfs_quotactl_operations;
 STATIC struct super_operations xfs_super_operations;
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 47faf27..7f1e929 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -64,7 +64,7 @@
 /* Host-dependent types and defines */
 
 #define ACPI_MACHINE_WIDTH          BITS_PER_LONG
-#define acpi_cache_t                        kmem_cache_t
+#define acpi_cache_t                        struct kmem_cache
 #define acpi_spinlock                   spinlock_t *
 #define ACPI_EXPORT_SYMBOL(symbol)  EXPORT_SYMBOL(symbol);
 #define strtoul                     simple_strtoul
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index b9ff4d8..57e09f5 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -51,7 +51,7 @@
 
 #define dma_alloc_noncoherent(d, s, h, f)	dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h)	dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(dev)			(1)
+#define dma_is_consistent(d, h)			(1)
 
 int dma_set_mask(struct device *dev, u64 mask);
 
@@ -60,7 +60,7 @@
 #define dma_sync_single_range(dev, addr, off, size, dir)  do { } while (0)
 #define dma_sync_sg_for_cpu(dev, sg, nents, dir)	  do { } while (0)
 #define dma_sync_sg_for_device(dev, sg, nents, dir)	  do { } while (0)
-#define dma_cache_sync(va, size, dir)			  do { } while (0)
+#define dma_cache_sync(dev, va, size, dir)		  do { } while (0)
 
 #define dma_get_cache_alignment()			  L1_CACHE_BYTES
 
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index 2cabbd4..84313d1 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -387,188 +387,6 @@
 
 #define NR_SYSCALLS			447
 
-#if defined(__GNUC__)
-
-#define _syscall_return(type)						\
-	return (_sc_err ? errno = _sc_ret, _sc_ret = -1L : 0), (type) _sc_ret
-
-#define _syscall_clobbers						\
-	"$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8",			\
-	"$22", "$23", "$24", "$25", "$27", "$28" 			\
-
-#define _syscall0(type, name)						\
-type name(void)								\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		__asm__("callsys # %0 %1 %2"				\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0)					\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall1(type,name,type1,arg1)					\
-type name(type1 arg1)							\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		__asm__("callsys # %0 %1 %2 %3"				\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16)			\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)			\
-type name(type1 arg1,type2 arg2)					\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		__asm__("callsys # %0 %1 %2 %3 %4"			\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17)		\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
-type name(type1 arg1,type2 arg2,type3 arg3)				\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_18 __asm__("$18");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		_sc_18 = (long) (arg3);					\
-		__asm__("callsys # %0 %1 %2 %3 %4 %5"			\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
-			  "r"(_sc_18)					\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4)		 \
-{									 \
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_18 __asm__("$18");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		_sc_18 = (long) (arg3);					\
-		_sc_19 = (long) (arg4);					\
-		__asm__("callsys # %0 %1 %2 %3 %4 %5 %6"		\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
-			  "r"(_sc_18), "1"(_sc_19)			\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5)							 \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_18 __asm__("$18");			\
-		register long _sc_19 __asm__("$19");			\
-		register long _sc_20 __asm__("$20");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		_sc_18 = (long) (arg3);					\
-		_sc_19 = (long) (arg4);					\
-		_sc_20 = (long) (arg5);					\
-		__asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7"		\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
-			  "r"(_sc_18), "1"(_sc_19), "r"(_sc_20)		\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5,type6,arg6)					 \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_18 __asm__("$18");			\
-		register long _sc_19 __asm__("$19");			\
-		register long _sc_20 __asm__("$20");			\
-		register long _sc_21 __asm__("$21");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		_sc_18 = (long) (arg3);					\
-		_sc_19 = (long) (arg4);					\
-		_sc_20 = (long) (arg5);					\
-		_sc_21 = (long) (arg6);					\
-		__asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7 %8"		\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
-			  "r"(_sc_18), "1"(_sc_19), "r"(_sc_20), "r"(_sc_21) \
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#endif /* __GNUC__ */
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-arm/arch-omap/irda.h b/include/asm-arm/arch-omap/irda.h
index 805ae35..345a649 100644
--- a/include/asm-arm/arch-omap/irda.h
+++ b/include/asm-arm/arch-omap/irda.h
@@ -24,7 +24,7 @@
 	/* Very specific to the needs of some platforms (h3,h4)
 	 * having calls which can sleep in irda_set_speed.
 	 */
-	struct work_struct gpio_expa;
+	struct delayed_work gpio_expa;
 	int rx_channel;
 	int tx_channel;
 	unsigned long dest_start;
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index 6666177..9bc46b4 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -48,7 +48,7 @@
 	return 32;
 }
 
-static inline int dma_is_consistent(dma_addr_t handle)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
 {
 	return !!arch_is_coherent();
 }
diff --git a/include/asm-arm/setup.h b/include/asm-arm/setup.h
index aa4b578..e540739 100644
--- a/include/asm-arm/setup.h
+++ b/include/asm-arm/setup.h
@@ -14,55 +14,57 @@
 #ifndef __ASMARM_SETUP_H
 #define __ASMARM_SETUP_H
 
+#include <asm/types.h>
+
 #define COMMAND_LINE_SIZE 1024
 
 /* The list ends with an ATAG_NONE node. */
 #define ATAG_NONE	0x00000000
 
 struct tag_header {
-	u32 size;
-	u32 tag;
+	__u32 size;
+	__u32 tag;
 };
 
 /* The list must start with an ATAG_CORE node */
 #define ATAG_CORE	0x54410001
 
 struct tag_core {
-	u32 flags;		/* bit 0 = read-only */
-	u32 pagesize;
-	u32 rootdev;
+	__u32 flags;		/* bit 0 = read-only */
+	__u32 pagesize;
+	__u32 rootdev;
 };
 
 /* it is allowed to have multiple ATAG_MEM nodes */
 #define ATAG_MEM	0x54410002
 
 struct tag_mem32 {
-	u32	size;
-	u32	start;	/* physical start address */
+	__u32	size;
+	__u32	start;	/* physical start address */
 };
 
 /* VGA text type displays */
 #define ATAG_VIDEOTEXT	0x54410003
 
 struct tag_videotext {
-	u8		x;
-	u8		y;
-	u16		video_page;
-	u8		video_mode;
-	u8		video_cols;
-	u16		video_ega_bx;
-	u8		video_lines;
-	u8		video_isvga;
-	u16		video_points;
+	__u8		x;
+	__u8		y;
+	__u16		video_page;
+	__u8		video_mode;
+	__u8		video_cols;
+	__u16		video_ega_bx;
+	__u8		video_lines;
+	__u8		video_isvga;
+	__u16		video_points;
 };
 
 /* describes how the ramdisk will be used in kernel */
 #define ATAG_RAMDISK	0x54410004
 
 struct tag_ramdisk {
-	u32 flags;	/* bit 0 = load, bit 1 = prompt */
-	u32 size;	/* decompressed ramdisk size in _kilo_ bytes */
-	u32 start;	/* starting block of floppy-based RAM disk image */
+	__u32 flags;	/* bit 0 = load, bit 1 = prompt */
+	__u32 size;	/* decompressed ramdisk size in _kilo_ bytes */
+	__u32 start;	/* starting block of floppy-based RAM disk image */
 };
 
 /* describes where the compressed ramdisk image lives (virtual address) */
@@ -76,23 +78,23 @@
 #define ATAG_INITRD2	0x54420005
 
 struct tag_initrd {
-	u32 start;	/* physical start address */
-	u32 size;	/* size of compressed ramdisk image in bytes */
+	__u32 start;	/* physical start address */
+	__u32 size;	/* size of compressed ramdisk image in bytes */
 };
 
 /* board serial number. "64 bits should be enough for everybody" */
 #define ATAG_SERIAL	0x54410006
 
 struct tag_serialnr {
-	u32 low;
-	u32 high;
+	__u32 low;
+	__u32 high;
 };
 
 /* board revision */
 #define ATAG_REVISION	0x54410007
 
 struct tag_revision {
-	u32 rev;
+	__u32 rev;
 };
 
 /* initial values for vesafb-type framebuffers. see struct screen_info
@@ -101,20 +103,20 @@
 #define ATAG_VIDEOLFB	0x54410008
 
 struct tag_videolfb {
-	u16		lfb_width;
-	u16		lfb_height;
-	u16		lfb_depth;
-	u16		lfb_linelength;
-	u32		lfb_base;
-	u32		lfb_size;
-	u8		red_size;
-	u8		red_pos;
-	u8		green_size;
-	u8		green_pos;
-	u8		blue_size;
-	u8		blue_pos;
-	u8		rsvd_size;
-	u8		rsvd_pos;
+	__u16		lfb_width;
+	__u16		lfb_height;
+	__u16		lfb_depth;
+	__u16		lfb_linelength;
+	__u32		lfb_base;
+	__u32		lfb_size;
+	__u8		red_size;
+	__u8		red_pos;
+	__u8		green_size;
+	__u8		green_pos;
+	__u8		blue_size;
+	__u8		blue_pos;
+	__u8		rsvd_size;
+	__u8		rsvd_pos;
 };
 
 /* command line: \0 terminated string */
@@ -128,17 +130,17 @@
 #define ATAG_ACORN	0x41000101
 
 struct tag_acorn {
-	u32 memc_control_reg;
-	u32 vram_pages;
-	u8 sounddefault;
-	u8 adfsdrives;
+	__u32 memc_control_reg;
+	__u32 vram_pages;
+	__u8 sounddefault;
+	__u8 adfsdrives;
 };
 
 /* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */
 #define ATAG_MEMCLK	0x41000402
 
 struct tag_memclk {
-	u32 fmemclk;
+	__u32 fmemclk;
 };
 
 struct tag {
@@ -167,24 +169,26 @@
 };
 
 struct tagtable {
-	u32 tag;
+	__u32 tag;
 	int (*parse)(const struct tag *);
 };
 
-#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
-#define __tagtable(tag, fn) \
-static struct tagtable __tagtable_##fn __tag = { tag, fn }
-
 #define tag_member_present(tag,member)				\
 	((unsigned long)(&((struct tag *)0L)->member + 1)	\
 		<= (tag)->hdr.size * 4)
 
-#define tag_next(t)	((struct tag *)((u32 *)(t) + (t)->hdr.size))
+#define tag_next(t)	((struct tag *)((__u32 *)(t) + (t)->hdr.size))
 #define tag_size(type)	((sizeof(struct tag_header) + sizeof(struct type)) >> 2)
 
 #define for_each_tag(t,base)		\
 	for (t = base; t->hdr.size; t = tag_next(t))
 
+#ifdef __KERNEL__
+
+#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
+#define __tagtable(tag, fn) \
+static struct tagtable __tagtable_##fn __tag = { tag, fn }
+
 /*
  * Memory map description
  */
@@ -217,4 +221,6 @@
 static struct early_params __early_##fn __attribute_used__	\
 __attribute__((__section__(".early_param.init"))) = { name, fn }
 
+#endif  /*  __KERNEL__  */
+
 #endif
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index 14a87ee..d44c629 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -377,156 +377,6 @@
 #endif
 
 #ifdef __KERNEL__
-#include <linux/err.h>
-#include <linux/linkage.h>
-
-#define __sys2(x) #x
-#define __sys1(x) __sys2(x)
-
-#ifndef __syscall
-#if defined(__thumb__) || defined(__ARM_EABI__)
-#define __SYS_REG(name) register long __sysreg __asm__("r7") = __NR_##name;
-#define __SYS_REG_LIST(regs...) "r" (__sysreg) , ##regs
-#define __syscall(name) "swi\t0"
-#else
-#define __SYS_REG(name)
-#define __SYS_REG_LIST(regs...) regs
-#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
-#endif
-#endif
-
-#define __syscall_return(type, res)					\
-do {									\
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) {	\
-		errno = -(res);						\
-		res = -1;						\
-	}								\
-	return (type) (res);						\
-} while (0)
-
-#define _syscall0(type,name)						\
-type name(void) {							\
-  __SYS_REG(name)							\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST()						\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall1(type,name,type1,arg1) 				\
-type name(type1 arg1) { 						\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0) )					\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)			\
-type name(type1 arg1,type2 arg2) {					\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1) )			\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
-type name(type1 arg1,type2 arg2,type3 arg3) {				\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2) )		\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) {		\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), "r" (__r3) ) \
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-  
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5)	\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) {	\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __r4 __asm__("r4") = (long)arg5;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2),		\
-			  "r" (__r3), "r" (__r4) )			\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6)	\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) {	\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __r4 __asm__("r4") = (long)arg5;			\
-  register long __r5 __asm__("r5") = (long)arg6;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2),		\
-			  "r" (__r3), "r" (__r4), "r" (__r5) )		\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-arm26/pgalloc.h b/include/asm-arm26/pgalloc.h
index 6437167..7725af3 100644
--- a/include/asm-arm26/pgalloc.h
+++ b/include/asm-arm26/pgalloc.h
@@ -15,7 +15,7 @@
 #include <asm/tlbflush.h>
 #include <linux/slab.h>
 
-extern kmem_cache_t *pte_cache;
+extern struct kmem_cache *pte_cache;
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr){
 	return kmem_cache_alloc(pte_cache, GFP_KERNEL);
diff --git a/include/asm-arm26/setup.h b/include/asm-arm26/setup.h
index 6348931..1a867b4 100644
--- a/include/asm-arm26/setup.h
+++ b/include/asm-arm26/setup.h
@@ -16,6 +16,8 @@
 
 #define COMMAND_LINE_SIZE 1024
 
+#ifdef __KERNEL__
+
 /* The list ends with an ATAG_NONE node. */
 #define ATAG_NONE	0x00000000
 
@@ -202,4 +204,6 @@
 
 extern struct meminfo meminfo;
 
+#endif  /*  __KERNEL__  */
+
 #endif
diff --git a/include/asm-arm26/unistd.h b/include/asm-arm26/unistd.h
index 25a5eea..4c3b919 100644
--- a/include/asm-arm26/unistd.h
+++ b/include/asm-arm26/unistd.h
@@ -311,139 +311,6 @@
 #define __ARM_NR_usr26			(__ARM_NR_BASE+3)
 
 #ifdef __KERNEL__
-#include <linux/err.h>
-#include <linux/linkage.h>
-
-#define __sys2(x) #x
-#define __sys1(x) __sys2(x)
-
-#ifndef __syscall
-#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
-#endif
-
-#define __syscall_return(type, res)					\
-do {									\
-	if ((unsigned long)(res) >= (unsigned long)-MAX_ERRNO) {	\
-		errno = -(res);						\
-		res = -1;						\
-	}								\
-	return (type) (res);						\
-} while (0)
-
-#define _syscall0(type,name)						\
-type name(void) {							\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	:								\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall1(type,name,type1,arg1) 				\
-type name(type1 arg1) { 						\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0)							\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)			\
-type name(type1 arg1,type2 arg2) {					\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1) 					\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
-type name(type1 arg1,type2 arg2,type3 arg3) {				\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1),"r" (__r2)				\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) {		\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3)			\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-  
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5)	\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) {	\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __r4 __asm__("r4") = (long)arg5;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3),"r" (__r4)	\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6)	\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) {	\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __r4 __asm__("r4") = (long)arg5;			\
-  register long __r5 __asm__("r5") = (long)arg6;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3), "r" (__r4),"r" (__r5)		\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 4c40cb4..0580b5d 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -8,7 +8,8 @@
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 
-extern void dma_cache_sync(void *vaddr, size_t size, int direction);
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	int direction);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -307,7 +308,7 @@
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-static inline int dma_is_consistent(dma_addr_t dma_addr)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h
index 10193da..0a52242 100644
--- a/include/asm-avr32/setup.h
+++ b/include/asm-avr32/setup.h
@@ -13,6 +13,8 @@
 
 #define COMMAND_LINE_SIZE 256
 
+#ifdef __KERNEL__
+
 /* Magic number indicating that a tag table is present */
 #define ATAG_MAGIC	0xa2a25441
 
@@ -138,4 +140,6 @@
 
 #endif /* !__ASSEMBLY__ */
 
+#endif  /*  __KERNEL__  */
+
 #endif /* __ASM_AVR32_SETUP_H__ */
diff --git a/include/asm-cris/arch-v10/bitops.h b/include/asm-cris/arch-v10/bitops.h
index b73f539..be85f6d 100644
--- a/include/asm-cris/arch-v10/bitops.h
+++ b/include/asm-cris/arch-v10/bitops.h
@@ -10,7 +10,7 @@
  * number.  They differ in that the first function also inverts all bits
  * in the input.
  */
-extern inline unsigned long cris_swapnwbrlz(unsigned long w)
+static inline unsigned long cris_swapnwbrlz(unsigned long w)
 {
 	/* Let's just say we return the result in the same register as the
 	   input.  Saying we clobber the input but can return the result
@@ -26,7 +26,7 @@
 	return res;
 }
 
-extern inline unsigned long cris_swapwbrlz(unsigned long w)
+static inline unsigned long cris_swapwbrlz(unsigned long w)
 {
 	unsigned res;
 	__asm__ ("swapwbr %0 \n\t"
@@ -40,7 +40,7 @@
  * ffz = Find First Zero in word. Undefined if no zero exists,
  * so code should check against ~0UL first..
  */
-extern inline unsigned long ffz(unsigned long w)
+static inline unsigned long ffz(unsigned long w)
 {
 	return cris_swapnwbrlz(w);
 }
@@ -51,7 +51,7 @@
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-extern inline unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
 {
 	return cris_swapnwbrlz(~word);
 }
@@ -65,7 +65,7 @@
  * differs in spirit from the above ffz (man ffs).
  */
 
-extern inline unsigned long kernel_ffs(unsigned long w)
+static inline unsigned long kernel_ffs(unsigned long w)
 {
 	return w ? cris_swapwbrlz (w) + 1 : 0;
 }
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index cbf1a98..662cea7 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -156,10 +156,10 @@
 	return (1 << INTERNODE_CACHE_SHIFT);
 }
 
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 }
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h
index dbd0f30..a8e1e6c 100644
--- a/include/asm-cris/semaphore-helper.h
+++ b/include/asm-cris/semaphore-helper.h
@@ -20,12 +20,12 @@
 /*
  * These two _must_ execute atomically wrt each other.
  */
-extern inline void wake_one_more(struct semaphore * sem)
+static inline void wake_one_more(struct semaphore * sem)
 {
 	atomic_inc(&sem->waking);
 }
 
-extern inline int waking_non_zero(struct semaphore *sem)
+static inline int waking_non_zero(struct semaphore *sem)
 {
 	unsigned long flags;
 	int ret = 0;
@@ -40,7 +40,7 @@
 	return ret;
 }
 
-extern inline int waking_non_zero_interruptible(struct semaphore *sem,
+static inline int waking_non_zero_interruptible(struct semaphore *sem,
 						struct task_struct *tsk)
 {
 	int ret = 0;
@@ -59,7 +59,7 @@
 	return ret;
 }
 
-extern inline int waking_non_zero_trylock(struct semaphore *sem)
+static inline int waking_non_zero_trylock(struct semaphore *sem)
 {
         int ret = 1;
 	unsigned long flags;
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index e9fc1d4..bcb2df6 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -172,10 +172,10 @@
 	return 1 << L1_CACHE_SHIFT;
 }
 
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		    enum dma_data_direction direction)
 {
 	flush_write_buffers();
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index 0f390f4..ff4d6cd 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -115,7 +115,7 @@
 {
 	unsigned long paddr;
 
-	inc_preempt_count();
+	pagefault_disable();
 	paddr = page_to_phys(page);
 
 	switch (type) {
@@ -170,8 +170,7 @@
 	default:
 		BUG();
 	}
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-frv/param.h b/include/asm-frv/param.h
index 168381e..365653b 100644
--- a/include/asm-frv/param.h
+++ b/include/asm-frv/param.h
@@ -18,6 +18,5 @@
 #endif
 
 #define MAXHOSTNAMELEN		64	/* max length of hostname */
-#define COMMAND_LINE_SIZE	512
 
 #endif /* _ASM_PARAM_H */
diff --git a/include/asm-frv/setup.h b/include/asm-frv/setup.h
index 0d293b9..afd787c 100644
--- a/include/asm-frv/setup.h
+++ b/include/asm-frv/setup.h
@@ -12,6 +12,10 @@
 #ifndef _ASM_SETUP_H
 #define _ASM_SETUP_H
 
+#define COMMAND_LINE_SIZE       512
+
+#ifdef __KERNEL__
+
 #include <linux/init.h>
 
 #ifndef __ASSEMBLY__
@@ -22,4 +26,6 @@
 
 #endif /* !__ASSEMBLY__ */
 
+#endif  /*  __KERNEL__  */
+
 #endif /* _ASM_SETUP_H */
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index 725e854..584c041 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -320,125 +320,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls 310
-#include <linux/err.h>
-
-/*
- * process the return value of a syscall, consigning it to one of two possible fates
- * - user-visible error numbers are in the range -1 - -4095: see <asm-frv/errno.h>
- */
-#undef __syscall_return
-#define __syscall_return(type, res)					\
-do {									\
-        unsigned long __sr2 = (res);					\
-	if (__builtin_expect(__sr2 >= (unsigned long)(-MAX_ERRNO), 0)) { \
-		errno = (-__sr2);					\
-		__sr2 = ~0UL;						\
-	}								\
-	return (type) __sr2;						\
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-
-#undef _syscall0
-#define _syscall0(type,name)						\
-type name(void)								\
-{									\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);	\
-	register unsigned long __sc0 __asm__ ("gr8");			\
-	__asm__ __volatile__ ("tira gr0,#0"				\
-			      : "=r" (__sc0)				\
-			      : "r" (__scnum));				\
-	__syscall_return(type, __sc0);					\
-}
-
-#undef _syscall1
-#define _syscall1(type,name,type1,arg1)						\
-type name(type1 arg1)								\
-{										\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);		\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;	\
-	__asm__ __volatile__ ("tira gr0,#0"					\
-			      : "+r" (__sc0)					\
-			      : "r" (__scnum));					\
-	__syscall_return(type, __sc0);						\
-}
-
-#undef _syscall2
-#define _syscall2(type,name,type1,arg1,type2,arg2)				\
-type name(type1 arg1,type2 arg2)						\
-{										\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);		\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;	\
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;	\
-	__asm__ __volatile__ ("tira gr0,#0"					\
-			      : "+r" (__sc0)					\
-			      : "r" (__scnum), "r" (__sc1));			\
-	__syscall_return(type, __sc0);						\
-}
-
-#undef _syscall3
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)			\
-type name(type1 arg1,type2 arg2,type3 arg3)					\
-{										\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);		\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;	\
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;	\
-	register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;	\
-	__asm__ __volatile__ ("tira gr0,#0"					\
-			      : "+r" (__sc0)					\
-			      : "r" (__scnum), "r" (__sc1), "r" (__sc2));	\
-	__syscall_return(type, __sc0);						\
-}
-
-#undef _syscall4
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)		\
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4)				\
-{											\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);			\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;		\
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;		\
-	register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;		\
-	register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4;		\
-	__asm__ __volatile__ ("tira gr0,#0"						\
-			      : "+r" (__sc0)						\
-			      : "r" (__scnum), "r" (__sc1), "r" (__sc2), "r" (__sc3));	\
-	__syscall_return(type, __sc0);							\
-}
-
-#undef _syscall5
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5)	\
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)			\
-{											\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);			\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;		\
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;		\
-	register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;		\
-	register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4;		\
-	register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5;		\
-	__asm__ __volatile__ ("tira gr0,#0"						\
-			      : "+r" (__sc0)						\
-			      : "r" (__scnum), "r" (__sc1), "r" (__sc2),		\
-			      "r" (__sc3), "r" (__sc4));				\
-	__syscall_return(type, __sc0);							\
-}
-
-#undef _syscall6
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6)		 \
-{												 \
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);				 \
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;			 \
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;			 \
-	register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;			 \
-	register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4;			 \
-	register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5;			 \
-	register unsigned long __sc5 __asm__ ("gr13") = (unsigned long) arg6;			 \
-	__asm__ __volatile__ ("tira gr0,#0"							 \
-			      : "+r" (__sc0)							 \
-			      : "r" (__scnum), "r" (__sc1), "r" (__sc2),			 \
-			      "r" (__sc3), "r" (__sc4), "r" (__sc5));				 \
-	__syscall_return(type, __sc0);								 \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 /* #define __ARCH_WANT_OLD_READDIR */
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 3c06be3..fa14f8c 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -1,4 +1,3 @@
-header-y += atomic.h
 header-y += errno-base.h
 header-y += errno.h
 header-y += fcntl.h
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index a84c3d8..a37e95f 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -14,6 +14,7 @@
 unifdef-y += ptrace.h
 unifdef-y += resource.h
 unifdef-y += sembuf.h
+unifdef-y += setup.h
 unifdef-y += shmbuf.h
 unifdef-y += sigcontext.h
 unifdef-y += siginfo.h
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 42a95d9..b7e4a04 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -66,7 +66,7 @@
 	atomic64_sub(i, v);
 }
 
-#else
+#else  /*  BITS_PER_LONG == 64  */
 
 typedef atomic_t atomic_long_t;
 
@@ -113,5 +113,6 @@
 	atomic_sub(i, v);
 }
 
-#endif
-#endif
+#endif  /*  BITS_PER_LONG == 64  */
+
+#endif  /*  _ASM_GENERIC_ATOMIC_H  */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index b541e48..783ab99 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -266,7 +266,7 @@
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline int
 dma_get_cache_alignment(void)
@@ -295,7 +295,7 @@
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	/* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index df893c1..f422df0 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -21,7 +21,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -33,7 +33,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-h8300/delay.h b/include/asm-h8300/delay.h
index cbccbbd..743beba 100644
--- a/include/asm-h8300/delay.h
+++ b/include/asm-h8300/delay.h
@@ -9,7 +9,7 @@
  * Delay routines, using a pre-computed "loops_per_second" value.
  */
 
-extern __inline__ void __delay(unsigned long loops)
+static inline void __delay(unsigned long loops)
 {
 	__asm__ __volatile__ ("1:\n\t"
 			      "dec.l #1,%0\n\t"
@@ -27,7 +27,7 @@
 
 extern unsigned long loops_per_jiffy;
 
-extern __inline__ void udelay(unsigned long usecs)
+static inline void udelay(unsigned long usecs)
 {
 	usecs *= 4295;		/* 2**32 / 1000000 */
 	usecs /= (loops_per_jiffy*HZ);
diff --git a/include/asm-h8300/mmu_context.h b/include/asm-h8300/mmu_context.h
index 855721a..5c165f7 100644
--- a/include/asm-h8300/mmu_context.h
+++ b/include/asm-h8300/mmu_context.h
@@ -9,7 +9,7 @@
 {
 }
 
-extern inline int
+static inline int
 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
 	// mm->context = virt_to_phys(mm->pgd);
@@ -23,7 +23,7 @@
 {
 }
 
-extern inline void activate_mm(struct mm_struct *prev_mm,
+static inline void activate_mm(struct mm_struct *prev_mm,
 			       struct mm_struct *next_mm)
 {
 }
diff --git a/include/asm-h8300/pci.h b/include/asm-h8300/pci.h
index 5edad5b..0c771b0 100644
--- a/include/asm-h8300/pci.h
+++ b/include/asm-h8300/pci.h
@@ -10,12 +10,12 @@
 #define pcibios_assign_all_busses()	0
 #define pcibios_scan_all_fns(a, b)	0
 
-extern inline void pcibios_set_master(struct pci_dev *dev)
+static inline void pcibios_set_master(struct pci_dev *dev)
 {
 	/* No special bus mastering setup handling */
 }
 
-extern inline void pcibios_penalize_isa_irq(int irq, int active)
+static inline void pcibios_penalize_isa_irq(int irq, int active)
 {
 	/* We don't do dynamic PCI IRQ allocation */
 }
diff --git a/include/asm-h8300/tlbflush.h b/include/asm-h8300/tlbflush.h
index bbdffbe..9a2c5c9 100644
--- a/include/asm-h8300/tlbflush.h
+++ b/include/asm-h8300/tlbflush.h
@@ -47,12 +47,12 @@
 	BUG();
 }
 
-extern inline void flush_tlb_kernel_page(unsigned long addr)
+static inline void flush_tlb_kernel_page(unsigned long addr)
 {
 	BUG();
 }
 
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
 				      unsigned long start, unsigned long end)
 {
 	BUG();
diff --git a/include/asm-h8300/unistd.h b/include/asm-h8300/unistd.h
index 747788d..7ddd414 100644
--- a/include/asm-h8300/unistd.h
+++ b/include/asm-h8300/unistd.h
@@ -295,172 +295,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls 289
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
-   <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* avoid using res which is declared to be in register d0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-#define _syscall0(type, name)				\
-type name(void)						\
-{							\
-  register long __res __asm__("er0");			\
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name)		\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall1(type, name, atype, a)			\
-type name(atype a)					\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  _a = (long)a;						\
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a)			\
-			: "cc", "memory");	 	\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall2(type, name, atype, a, btype, b)	\
-type name(atype a, btype b)				\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  _a = (long)a;						\
-  _b = (long)b;						\
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b)			\
-			: "cc", "memory");	 	\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c)	\
-type name(atype a, btype b, ctype c)			\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  register long _c __asm__("er3");			\
-  _a = (long)a;						\
-  _b = (long)b;						\
-  _c = (long)c;						\
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b),			\
-			  "g" (_c)			\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall4(type, name, atype, a, btype, b,	\
-                  ctype, c, dtype, d)			\
-type name(atype a, btype b, ctype c, dtype d)		\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  register long _c __asm__("er3");			\
-  register long _d __asm__("er4");			\
-  _a = (long)a;						\
-  _b = (long)b;						\
-  _c = (long)c;						\
-  _d = (long)d;						\
-  __asm__ __volatile__ ("mov.l	%1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b),			\
-			  "g" (_c),			\
-			  "g" (_d)			\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall5(type, name, atype, a, btype, b,	\
-                  ctype, c, dtype, d, etype, e)		\
-type name(atype a, btype b, ctype c, dtype d, etype e)	\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  register long _c __asm__("er3");			\
-  register long _d __asm__("er4");			\
-  register long _e __asm__("er5");			\
-  _a = (long)a;                                       	\
-  _b = (long)b;                                       	\
-  _c = (long)c;                                       	\
-  _d = (long)d;                                       	\
-  _e = (long)e;                                       	\
-  __asm__ __volatile__ ("mov.l	%1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b),			\
-			  "g" (_c),			\
-			  "g" (_d),			\
-			  "g" (_e)			\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall6(type, name, atype, a, btype, b,	\
-                  ctype, c, dtype, d, etype, e, ftype, f)	\
-type name(atype a, btype b, ctype c, dtype d, etype e, ftype f)	\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  register long _c __asm__("er3");			\
-  register long _d __asm__("er4");			\
-  register long _e __asm__("er5");			\
-  register long _f __asm__("er6");			\
-  _a = (long)a;                                       	\
-  _b = (long)b;                                       	\
-  _c = (long)c;                                       	\
-  _d = (long)d;                                       	\
-  _e = (long)e;                                       	\
-  _f = (long)f;                                       	\
-  __asm__ __volatile__ ("mov.l	%1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b),			\
-			  "g" (_c),			\
-			  "g" (_d),			\
-			  "g" (_e)			\
-			  "g" (_f)			\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild
index 147e4ac..5ae93af 100644
--- a/include/asm-i386/Kbuild
+++ b/include/asm-i386/Kbuild
@@ -7,5 +7,4 @@
 header-y += ucontext.h
 
 unifdef-y += mtrr.h
-unifdef-y += setup.h
 unifdef-y += vm86.h
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 6aab7a1..c57441b 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -14,7 +14,7 @@
  * on us. We need to use _exactly_ the address the user gave us,
  * not some alias that contains the same information.
  */
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { int counter; } atomic_t;
 
 #define ATOMIC_INIT(i)	{ (i) }
 
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index 81999a3..183eebe 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -156,10 +156,10 @@
 	return (1 << INTERNODE_CACHE_SHIFT);
 }
 
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	flush_write_buffers();
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 946d97c..438ef0e 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -56,7 +56,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	if (op == FUTEX_OP_SET)
 		__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
@@ -88,7 +88,7 @@
 		}
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 61b0733..3503ad6 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -120,13 +120,26 @@
 	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_low_pages(x) \
 	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-#define alloc_bootmem_node(ignore, x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_pages_node(ignore, x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages_node(ignore, x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-
+#define alloc_bootmem_node(pgdat, x)					\
+({									\
+	struct pglist_data  __attribute__ ((unused))			\
+				*__alloc_bootmem_node__pgdat = (pgdat);	\
+	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES,	\
+						__pa(MAX_DMA_ADDRESS));	\
+})
+#define alloc_bootmem_pages_node(pgdat, x)				\
+({									\
+	struct pglist_data  __attribute__ ((unused))			\
+				*__alloc_bootmem_node__pgdat = (pgdat);	\
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE,		\
+						__pa(MAX_DMA_ADDRESS))	\
+})
+#define alloc_bootmem_low_pages_node(pgdat, x)				\
+({									\
+	struct pglist_data  __attribute__ ((unused))			\
+				*__alloc_bootmem_node__pgdat = (pgdat);	\
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0);		\
+})
 #endif /* CONFIG_NEED_MULTIPLE_NODES */
 
 #endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h
index 745dc5b..21b3246 100644
--- a/include/asm-i386/param.h
+++ b/include/asm-i386/param.h
@@ -18,6 +18,5 @@
 #endif
 
 #define MAXHOSTNAMELEN	64	/* max length of hostname */
-#define COMMAND_LINE_SIZE 256
 
 #endif
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index b4a301f..e6a4723 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -35,14 +35,14 @@
 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 extern unsigned long empty_zero_page[1024];
 extern pgd_t swapper_pg_dir[1024];
-extern kmem_cache_t *pgd_cache;
-extern kmem_cache_t *pmd_cache;
+extern struct kmem_cache *pgd_cache;
+extern struct kmem_cache *pmd_cache;
 extern spinlock_t pgd_lock;
 extern struct page *pgd_list;
 
-void pmd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+void pmd_ctor(void *, struct kmem_cache *, unsigned long);
+void pgd_ctor(void *, struct kmem_cache *, unsigned long);
+void pgd_dtor(void *, struct kmem_cache *, unsigned long);
 void pgtable_cache_init(void);
 void paging_init(void);
 
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index bc598d6..041906f 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -75,8 +75,8 @@
 
 
 #define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
-	__RWSEM_DEP_MAP_INIT(name) }
+{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+  LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 9930c5a..67659db 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -6,6 +6,8 @@
 #ifndef _i386_SETUP_H
 #define _i386_SETUP_H
 
+#define COMMAND_LINE_SIZE 256
+
 #ifdef __KERNEL__
 #include <linux/pfn.h>
 
@@ -14,10 +16,8 @@
  */
 #define MAXMEM_PFN	PFN_DOWN(MAXMEM)
 #define MAX_NONPAE_PFN	(1 << 20)
-#endif
 
 #define PARAM_SIZE 4096
-#define COMMAND_LINE_SIZE 256
 
 #define OLD_CL_MAGIC_ADDR	0x90020
 #define OLD_CL_MAGIC		0xA33F
@@ -79,4 +79,6 @@
 
 #endif /* __ASSEMBLY__ */
 
+#endif  /*  __KERNEL__  */
+
 #endif /* _i386_SETUP_H */
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h
index 59efe84..4da9345 100644
--- a/include/asm-i386/spinlock_types.h
+++ b/include/asm-i386/spinlock_types.h
@@ -6,13 +6,13 @@
 #endif
 
 typedef struct {
-	volatile unsigned int slock;
+	unsigned int slock;
 } raw_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED	{ 1 }
 
 typedef struct {
-	volatile unsigned int lock;
+	unsigned int lock;
 } raw_rwlock_t;
 
 #define __RAW_RW_LOCK_UNLOCKED		{ RW_LOCK_BIAS }
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
index 3036152..8dbaafe 100644
--- a/include/asm-i386/suspend.h
+++ b/include/asm-i386/suspend.h
@@ -6,18 +6,7 @@
 #include <asm/desc.h>
 #include <asm/i387.h>
 
-static inline int
-arch_prepare_suspend(void)
-{
-	/* If you want to make non-PSE machine work, turn off paging
-           in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so
-           it could work...  */
-	if (!cpu_has_pse) {
-		printk(KERN_ERR "PSE is required for swsusp.\n");
-		return -EPERM;
-	}
-	return 0;
-}
+static inline int arch_prepare_suspend(void) { return 0; }
 
 /* image of the saved processor state */
 struct saved_context {
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index beeeaf6..833fa17 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -329,104 +329,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls 320
-#include <linux/err.h>
-
-/*
- * user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- * <asm-i386/errno.h>
- */
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-		errno = -(res); \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ volatile ("int $0x80" \
-	: "=a" (__res) \
-	: "0" (__NR_##name)); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
-		  "d" ((long)(arg3)) : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
-__syscall_return(type,__res); \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \
-                  "int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5,type6,arg6) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
-{ \
-long __res; \
-  struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \
-__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \
-                  "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \
-                  "pop %%ebx ;  pop %%ebp" \
-	: "=a" (__res) \
-	: "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild
index 15818a1..4a1e48b 100644
--- a/include/asm-ia64/Kbuild
+++ b/include/asm-ia64/Kbuild
@@ -10,7 +10,6 @@
 header-y += perfmon_default_smpl.h
 header-y += ptrace_offsets.h
 header-y += rse.h
-header-y += setup.h
 header-y += ucontext.h
 
 unifdef-y += perfmon.h
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index 99a8f8e..ebd5887 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -50,7 +50,8 @@
 extern int dma_get_cache_alignment(void);
 
 static inline void
-dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync (struct device *dev, void *vaddr, size_t size,
+	enum dma_data_direction dir)
 {
 	/*
 	 * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
@@ -59,6 +60,6 @@
 	mb();
 }
 
-#define dma_is_consistent(dma_handle)	(1)	/* all we do is coherent memory... */
+#define dma_is_consistent(d, h)	(1)	/* all we do is coherent memory... */
 
 #endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h
index 07d77f3..8a98a26 100644
--- a/include/asm-ia64/futex.h
+++ b/include/asm-ia64/futex.h
@@ -59,7 +59,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -83,7 +83,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 9cb68e9..393e04c 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -60,7 +60,7 @@
 static inline void pgtable_quicklist_free(void *pgtable_entry)
 {
 #ifdef CONFIG_NUMA
-	unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
+	int nid = page_to_nid(virt_to_page(pgtable_entry));
 
 	if (unlikely(nid != numa_node_id())) {
 		free_page((unsigned long)pgtable_entry);
diff --git a/include/asm-m32r/setup.h b/include/asm-m32r/setup.h
index 52f4fa2..6a0b322 100644
--- a/include/asm-m32r/setup.h
+++ b/include/asm-m32r/setup.h
@@ -1,6 +1,11 @@
 /*
  * This is set up by the setup-routine at boot-time
  */
+
+#define COMMAND_LINE_SIZE       512
+
+#ifdef __KERNEL__
+
 #define PARAM			((unsigned char *)empty_zero_page)
 
 #define MOUNT_ROOT_RDONLY	(*(unsigned long *) (PARAM+0x000))
@@ -18,8 +23,6 @@
 
 #define SCREEN_INFO		(*(struct screen_info *) (PARAM+0x200))
 
-#define COMMAND_LINE_SIZE	(512)
-
 #define RAMDISK_IMAGE_START_MASK	(0x07FF)
 #define RAMDISK_PROMPT_FLAG		(0x8000)
 #define RAMDISK_LOAD_FLAG		(0x4000)
@@ -27,3 +30,5 @@
 extern unsigned long memory_start;
 extern unsigned long memory_end;
 
+#endif  /*  __KERNEL__  */
+
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index 95aa342..5b66bd3 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -296,117 +296,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls 285
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- * <asm-m32r/errno.h>
- */
-
-#include <asm/syscall.h>	/* SYSCALL_* */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* Avoid using "res" which is declared to be in register r0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __res __asm__("r0"); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res), "r" (__arg2) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res), "r" (__arg2), \
-		"r" (__arg3) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg4 __asm__ ("r3") = (long)(arg4); \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res), "r" (__arg2), \
-		"r" (__arg3), "r" (__arg4) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	type5,arg5) \
-type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg5 __asm__ ("r4") = (long)(arg5); \
-register long __arg4 __asm__ ("r3") = (long)(arg4); \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res), "r" (__arg2), \
-		"r" (__arg3), "r" (__arg4), "r" (__arg5) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h
index d90d841..00259ed 100644
--- a/include/asm-m68k/dma-mapping.h
+++ b/include/asm-m68k/dma-mapping.h
@@ -21,7 +21,7 @@
 	return 1 << L1_CACHE_SHIFT;
 }
 
-static inline int dma_is_consistent(dma_addr_t dma_addr)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 0;
 }
@@ -41,7 +41,7 @@
 {
 	dma_free_coherent(dev, size, addr, handle);
 }
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 				  enum dma_data_direction dir)
 {
 	/* we use coherent allocation, so not much to do here. */
diff --git a/include/asm-m68k/setup.h b/include/asm-m68k/setup.h
index 7facc9a..2a8853c 100644
--- a/include/asm-m68k/setup.h
+++ b/include/asm-m68k/setup.h
@@ -41,8 +41,12 @@
 #define MACH_Q40     10
 #define MACH_SUN3X   11
 
+#define COMMAND_LINE_SIZE 256
+
 #ifdef __KERNEL__
 
+#define CL_SIZE COMMAND_LINE_SIZE
+
 #ifndef __ASSEMBLY__
 extern unsigned long m68k_machtype;
 #endif /* !__ASSEMBLY__ */
@@ -355,8 +359,6 @@
      */
 
 #define NUM_MEMINFO	4
-#define CL_SIZE		256
-#define COMMAND_LINE_SIZE	CL_SIZE
 
 #ifndef __ASSEMBLY__
 struct mem_info {
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h
index ad43480..fdbb60e 100644
--- a/include/asm-m68k/unistd.h
+++ b/include/asm-m68k/unistd.h
@@ -317,103 +317,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls		311
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
-   <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* avoid using res which is declared to be in register d0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-__asm__ __volatile__ ("trap  #0" \
-                      : "+d" (__res) ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,atype,a) \
-type name(atype a) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-__asm__ __volatile__ ("trap  #0" \
-		      : "+d" (__res) \
-		      : "d" (__a)  ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,atype,a,btype,b) \
-type name(atype a,btype b) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-__asm__ __volatile__ ("trap  #0" \
-		      : "+d" (__res) \
-                      : "d" (__a), "d" (__b) \
-		     ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
-type name(atype a,btype b,ctype c) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-__asm__ __volatile__ ("trap  #0" \
-		      : "+d" (__res) \
-                      : "d" (__a), "d" (__b), \
-			"d" (__c) \
-		     ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
-type name (atype a, btype b, ctype c, dtype d) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-register long __d __asm__ ("%d4") = (long)(d); \
-__asm__ __volatile__ ("trap  #0" \
-                      : "+d" (__res) \
-                      : "d" (__a), "d" (__b), \
-			"d" (__c), "d" (__d)  \
-		     ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name (atype a,btype b,ctype c,dtype d,etype e) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-register long __d __asm__ ("%d4") = (long)(d); \
-register long __e __asm__ ("%d5") = (long)(e); \
-__asm__ __volatile__ ("trap  #0" \
-		      : "+d" (__res) \
-		      : "d" (__a), "d" (__b), \
-			"d" (__c), "d" (__d), "d" (__e)  \
-                     ); \
-__syscall_return(type,__res); \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h
index 45e7a2fd..7b8f874 100644
--- a/include/asm-m68knommu/irq.h
+++ b/include/asm-m68knommu/irq.h
@@ -86,5 +86,6 @@
 #define enable_irq(x)	do { } while (0)
 #define disable_irq(x)	do { } while (0)
 #define disable_irq_nosync(x)	disable_irq(x)
+#define irq_canonicalize(irq)	(irq)
 
 #endif /* _M68K_IRQ_H_ */
diff --git a/include/asm-m68knommu/rtc.h b/include/asm-m68knommu/rtc.h
new file mode 100644
index 0000000..eaf18ec
--- /dev/null
+++ b/include/asm-m68knommu/rtc.h
@@ -0,0 +1 @@
+#include <asm-m68k/rtc.h>
diff --git a/include/asm-m68knommu/setup.h b/include/asm-m68knommu/setup.h
index d2b0fcc..fb86bb2 100644
--- a/include/asm-m68knommu/setup.h
+++ b/include/asm-m68knommu/setup.h
@@ -1,5 +1,10 @@
+#ifdef __KERNEL__
+
 #include <asm-m68k/setup.h>
 
 /* We have a bigger command line buffer. */
 #undef COMMAND_LINE_SIZE
+
+#endif  /*  __KERNEL__  */
+
 #define COMMAND_LINE_SIZE	512
diff --git a/include/asm-m68knommu/ucontext.h b/include/asm-m68knommu/ucontext.h
index 5d570ce..713a27f 100644
--- a/include/asm-m68knommu/ucontext.h
+++ b/include/asm-m68knommu/ucontext.h
@@ -5,21 +5,17 @@
 #define NGREG 18
 typedef greg_t gregset_t[NGREG];
 
-#ifdef CONFIG_FPU
 typedef struct fpregset {
 	int f_pcr;
 	int f_psr;
 	int f_fpiaddr;
 	int f_fpregs[8][3];
 } fpregset_t;
-#endif
 
 struct mcontext {
 	int version;
 	gregset_t gregs;
-#ifdef CONFIG_FPU
 	fpregset_t fpregs;
-#endif
 };
 
 #define MCONTEXT_VERSION 2
@@ -29,9 +25,7 @@
 	struct ucontext  *uc_link;
 	stack_t		  uc_stack;
 	struct mcontext	  uc_mcontext;
-#ifdef CONFIG_FPU
 	unsigned long	  uc_filler[80];
-#endif
 	sigset_t	  uc_sigmask;	/* mask last for extensibility */
 };
 
diff --git a/include/asm-m68knommu/unistd.h b/include/asm-m68knommu/unistd.h
index ebaf031..82e0319 100644
--- a/include/asm-m68knommu/unistd.h
+++ b/include/asm-m68knommu/unistd.h
@@ -318,156 +318,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls		311
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
-   <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* avoid using res which is declared to be in register d0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-#define _syscall0(type, name)							\
-type name(void)									\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name)					\
-			: "cc", "%d0");						\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall1(type, name, atype, a)						\
-type name(atype a)								\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%2, %%d1\n\t"					\
-  			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "g" ((long)a)						\
-			: "cc", "%d0", "%d1");					\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall2(type, name, atype, a, btype, b)				\
-type name(atype a, btype b)							\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%3, %%d2\n\t"					\
-  			"movel	%2, %%d1\n\t"					\
-			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "a" ((long)a),					\
-			  "g" ((long)b)						\
-			: "cc", "%d0", "%d1", "%d2");				\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c)			\
-type name(atype a, btype b, ctype c)						\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%4, %%d3\n\t"					\
-			"movel	%3, %%d2\n\t"					\
-  			"movel	%2, %%d1\n\t"					\
-			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "a" ((long)a),					\
-			  "a" ((long)b),					\
-			  "g" ((long)c)						\
-			: "cc", "%d0", "%d1", "%d2", "%d3");			\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d)		\
-type name(atype a, btype b, ctype c, dtype d)					\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%5, %%d4\n\t"					\
-			"movel	%4, %%d3\n\t"					\
-			"movel	%3, %%d2\n\t"					\
-  			"movel	%2, %%d1\n\t"					\
-			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "a" ((long)a),					\
-			  "a" ((long)b),					\
-			  "a" ((long)c),					\
-			  "g" ((long)d)						\
-			: "cc", "%d0", "%d1", "%d2", "%d3",			\
-			  "%d4");						\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e)	\
-type name(atype a, btype b, ctype c, dtype d, etype e)				\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%6, %%d5\n\t"					\
-			"movel	%5, %%d4\n\t"					\
-			"movel	%4, %%d3\n\t"					\
-			"movel	%3, %%d2\n\t"					\
-  			"movel	%2, %%d1\n\t"					\
-			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "a" ((long)a),					\
-			  "a" ((long)b),					\
-			  "a" ((long)c),					\
-			  "a" ((long)d),					\
-			  "g" ((long)e)						\
-			: "cc", "%d0", "%d1", "%d2", "%d3",			\
-			  "%d4", "%d5");					\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
index 4328863..236d1a4 100644
--- a/include/asm-mips/dma-mapping.h
+++ b/include/asm-mips/dma-mapping.h
@@ -63,9 +63,9 @@
 	return 128;
 }
 
-extern int dma_is_consistent(dma_addr_t dma_addr);
+extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
 
-extern void dma_cache_sync(void *vaddr, size_t size,
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction);
 
 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index 927a216..47e5679 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -88,7 +88,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -115,7 +115,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-mips/highmem.h b/include/asm-mips/highmem.h
index c976bfa..f8c8182 100644
--- a/include/asm-mips/highmem.h
+++ b/include/asm-mips/highmem.h
@@ -21,6 +21,7 @@
 
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/uaccess.h>
 #include <asm/kmap_types.h>
 
 /* undef for production */
@@ -70,11 +71,16 @@
 
 static inline void *kmap_atomic(struct page *page, enum km_type type)
 {
+	pagefault_disable();
 	return page_address(page);
 }
 
-static inline void kunmap_atomic(void *kvaddr, enum km_type type) { }
-#define kmap_atomic_pfn(pfn, idx)	page_address(pfn_to_page(pfn))
+static inline void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+	pagefault_enable();
+}
+
+#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
 
 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
 
diff --git a/include/asm-mips/i8259.h b/include/asm-mips/i8259.h
index 0214abe..4df8d8b 100644
--- a/include/asm-mips/i8259.h
+++ b/include/asm-mips/i8259.h
@@ -19,10 +19,31 @@
 
 #include <asm/io.h>
 
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD		0x20
+#define PIC_MASTER_IMR		0x21
+#define PIC_MASTER_ISR		PIC_MASTER_CMD
+#define PIC_MASTER_POLL		PIC_MASTER_ISR
+#define PIC_MASTER_OCW3		PIC_MASTER_ISR
+#define PIC_SLAVE_CMD		0xa0
+#define PIC_SLAVE_IMR		0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR		2
+#define MASTER_ICW4_DEFAULT	0x01
+#define SLAVE_ICW4_DEFAULT	0x01
+#define PIC_ICW4_AEOI		2
+
 extern spinlock_t i8259A_lock;
 
+extern void init_8259A(int auto_eoi);
+extern void enable_8259A_irq(unsigned int irq);
+extern void disable_8259A_irq(unsigned int irq);
+
 extern void init_i8259_irqs(void);
 
+#define I8259A_IRQ_BASE	0
+
 /*
  * Do the traditional i8259 interrupt polling thing.  This is for the few
  * cases where no better interrupt acknowledge method is available and we
@@ -35,15 +56,15 @@
 	spin_lock(&i8259A_lock);
 
 	/* Perform an interrupt acknowledge cycle on controller 1. */
-	outb(0x0C, 0x20);		/* prepare for poll */
-	irq = inb(0x20) & 7;
-	if (irq == 2) {
+	outb(0x0C, PIC_MASTER_CMD);		/* prepare for poll */
+	irq = inb(PIC_MASTER_CMD) & 7;
+	if (irq == PIC_CASCADE_IR) {
 		/*
 		 * Interrupt is cascaded so perform interrupt
 		 * acknowledge on controller 2.
 		 */
-		outb(0x0C, 0xA0);		/* prepare for poll */
-		irq = (inb(0xA0) & 7) + 8;
+		outb(0x0C, PIC_SLAVE_CMD);		/* prepare for poll */
+		irq = (inb(PIC_SLAVE_CMD) & 7) + 8;
 	}
 
 	if (unlikely(irq == 7)) {
@@ -54,14 +75,14 @@
 		 * significant bit is not set then there is no valid
 		 * interrupt.
 		 */
-		outb(0x0B, 0x20);		/* ISR register */
-		if(~inb(0x20) & 0x80)
+		outb(0x0B, PIC_MASTER_ISR);		/* ISR register */
+		if(~inb(PIC_MASTER_ISR) & 0x80)
 			irq = -1;
 	}
 
 	spin_unlock(&i8259A_lock);
 
-	return irq;
+	return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq;
 }
 
 #endif /* _ASM_I8259_H */
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index d20f2e9..2fbd47e 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -156,9 +156,9 @@
 #define __pte_offset(address)						\
 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset(dir, address)					\
-	((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address))
-#define pte_offset_kernel(dir, address) \
-	((pte_t *) pmd_page_vaddr(*(dir)) +  __pte_offset(address))
+	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
+#define pte_offset_kernel(dir, address)					\
+	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 
 #define pte_offset_map(dir, address)                                    \
 	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h
index b9b1e86..a5b1871 100644
--- a/include/asm-mips/pgtable-64.h
+++ b/include/asm-mips/pgtable-64.h
@@ -212,9 +212,9 @@
 #define __pte_offset(address)						\
 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset(dir, address)					\
-	((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address))
+	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 #define pte_offset_kernel(dir, address)					\
-	((pte_t *) pmd_page_vaddr(*(dir)) +  __pte_offset(address))
+	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 #define pte_offset_map(dir, address)					\
 	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 #define pte_offset_map_nested(dir, address)				\
diff --git a/include/asm-mips/setup.h b/include/asm-mips/setup.h
index 737fa4a..70009a9 100644
--- a/include/asm-mips/setup.h
+++ b/include/asm-mips/setup.h
@@ -1,8 +1,6 @@
-#ifdef __KERNEL__
 #ifndef _MIPS_SETUP_H
 #define _MIPS_SETUP_H
 
 #define COMMAND_LINE_SIZE	256
 
 #endif /* __SETUP_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index ec56aa5..696cff3 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -933,268 +933,6 @@
 
 #ifndef __ASSEMBLY__
 
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-	register unsigned long __a3 asm("$7"); \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %2\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "=r" (__a3) \
-	: "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-/*
- * DANGER: This macro isn't usable for the pipe(2) call
- * which has a unusual return convention.
- */
-#define _syscall1(type,name,atype,a) \
-type name(atype a) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a3 asm("$7"); \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %3\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "=r" (__a3) \
-	: "r" (__a0), "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall2(type,name,atype,a,btype,b) \
-type name(atype a, btype b) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a3 asm("$7"); \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %4\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "=r" (__a3) \
-	: "r" (__a0), "r" (__a1), "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
-type name(atype a, btype b, ctype c) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7"); \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %5\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "=r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
-type name(atype a, btype b, ctype c, dtype d) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %5\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#if (_MIPS_SIM == _MIPS_SIM_ABI32)
-
-/*
- * Using those means your brain needs more than an oil change ;-)
- */
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name(atype a, btype b, ctype c, dtype d, etype e) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"lw\t$2, %6\n\t" \
-	"subu\t$29, 32\n\t" \
-	"sw\t$2, 16($29)\n\t" \
-	"li\t$2, %5\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	"addiu\t$29, 32\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
-	  "m" ((unsigned long)e) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
-type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"lw\t$2, %6\n\t" \
-	"lw\t$8, %7\n\t" \
-	"subu\t$29, 32\n\t" \
-	"sw\t$2, 16($29)\n\t" \
-	"sw\t$8, 20($29)\n\t" \
-	"li\t$2, %5\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	"addiu\t$29, 32\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
-	  "m" ((unsigned long)e), "m" ((unsigned long)f) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#endif /* (_MIPS_SIM == _MIPS_SIM_ABI32) */
-
-#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name (atype a,btype b,ctype c,dtype d,etype e) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	register unsigned long __a4 asm("$8") = (unsigned long) e; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %6\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "i" (__NR_##name) \
-	: "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
-type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	register unsigned long __a4 asm("$8") = (unsigned long) e; \
-	register unsigned long __a5 asm("$9") = (unsigned long) f; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %7\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "r" (__a5), \
-	  "i" (__NR_##name) \
-	: "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#endif /* (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) */
-
-
 #define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index 1e387e1..66f0b40 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -191,13 +191,13 @@
 }
 
 static inline int
-dma_is_consistent(dma_addr_t dma_addr)
+dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	if(hppa_dma_ops->dma_sync_single_for_cpu)
diff --git a/include/asm-parisc/futex.h b/include/asm-parisc/futex.h
index d84bbb2..dbee6e6 100644
--- a/include/asm-parisc/futex.h
+++ b/include/asm-parisc/futex.h
@@ -21,7 +21,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -33,7 +33,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 7e38b5f..7c7de87 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -342,9 +342,9 @@
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 #ifdef CONFIG_NOT_COHERENT_CACHE
-#define dma_is_consistent(d)	(0)
+#define dma_is_consistent(d, h)	(0)
 #else
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 #endif
 
 static inline int dma_get_cache_alignment(void)
@@ -378,7 +378,7 @@
 	dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		enum dma_data_direction direction)
 {
 	BUG_ON(direction == DMA_NONE);
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index b543664..d36426c 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -124,12 +124,10 @@
 # define ELF_DATA	ELFDATA2MSB
   typedef elf_greg_t64 elf_greg_t;
   typedef elf_gregset_t64 elf_gregset_t;
-# define elf_addr_t unsigned long
 #else
   /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
   typedef elf_greg_t32 elf_greg_t;
   typedef elf_gregset_t32 elf_gregset_t;
-# define elf_addr_t __u32
 #endif /* ELF_ARCH */
 
 /* Floating point registers */
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index 936422e..3f3673f 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -43,7 +43,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -65,7 +65,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index ae63db7..b0830db 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -11,7 +11,7 @@
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 
-extern kmem_cache_t *pgtable_cache[];
+extern struct kmem_cache *pgtable_cache[];
 
 #ifdef CONFIG_PPC_64K_PAGES
 #define PTE_CACHE_NUM	0
diff --git a/include/asm-powerpc/setup.h b/include/asm-powerpc/setup.h
index 3d9740a..817fac0 100644
--- a/include/asm-powerpc/setup.h
+++ b/include/asm-powerpc/setup.h
@@ -1,9 +1,6 @@
 #ifndef _ASM_POWERPC_SETUP_H
 #define _ASM_POWERPC_SETUP_H
 
-#ifdef __KERNEL__
-
 #define COMMAND_LINE_SIZE	512
 
-#endif	/* __KERNEL__ */
 #endif	/* _ASM_POWERPC_SETUP_H */
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index 04b6c17..0ae954e 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -334,115 +334,6 @@
 
 #ifndef __ASSEMBLY__
 
-/* On powerpc a system call basically clobbers the same registers like a
- * function call, with the exception of LR (which is needed for the
- * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
- * an error return status).
- */
-
-#define __syscall_nr(nr, type, name, args...)				\
-	unsigned long __sc_ret, __sc_err;				\
-	{								\
-		register unsigned long __sc_0  __asm__ ("r0");		\
-		register unsigned long __sc_3  __asm__ ("r3");		\
-		register unsigned long __sc_4  __asm__ ("r4");		\
-		register unsigned long __sc_5  __asm__ ("r5");		\
-		register unsigned long __sc_6  __asm__ ("r6");		\
-		register unsigned long __sc_7  __asm__ ("r7");		\
-		register unsigned long __sc_8  __asm__ ("r8");		\
-									\
-		__sc_loadargs_##nr(name, args);				\
-		__asm__ __volatile__					\
-			("sc           \n\t"				\
-			 "mfcr %0      "				\
-			: "=&r" (__sc_0),				\
-			  "=&r" (__sc_3),  "=&r" (__sc_4),		\
-			  "=&r" (__sc_5),  "=&r" (__sc_6),		\
-			  "=&r" (__sc_7),  "=&r" (__sc_8)		\
-			: __sc_asm_input_##nr				\
-			: "cr0", "ctr", "memory",			\
-			  "r9", "r10","r11", "r12");			\
-		__sc_ret = __sc_3;					\
-		__sc_err = __sc_0;					\
-	}								\
-	if (__sc_err & 0x10000000)					\
-	{								\
-		errno = __sc_ret;					\
-		__sc_ret = -1;						\
-	}								\
-	return (type) __sc_ret
-
-#define __sc_loadargs_0(name, dummy...)					\
-	__sc_0 = __NR_##name
-#define __sc_loadargs_1(name, arg1)					\
-	__sc_loadargs_0(name);						\
-	__sc_3 = (unsigned long) (arg1)
-#define __sc_loadargs_2(name, arg1, arg2)				\
-	__sc_loadargs_1(name, arg1);					\
-	__sc_4 = (unsigned long) (arg2)
-#define __sc_loadargs_3(name, arg1, arg2, arg3)				\
-	__sc_loadargs_2(name, arg1, arg2);				\
-	__sc_5 = (unsigned long) (arg3)
-#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4)			\
-	__sc_loadargs_3(name, arg1, arg2, arg3);			\
-	__sc_6 = (unsigned long) (arg4)
-#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5)		\
-	__sc_loadargs_4(name, arg1, arg2, arg3, arg4);			\
-	__sc_7 = (unsigned long) (arg5)
-#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6)	\
-	__sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5);		\
-	__sc_8 = (unsigned long) (arg6)
-
-#define __sc_asm_input_0 "0" (__sc_0)
-#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
-#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
-#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
-#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
-#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
-#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8)
-
-#define _syscall0(type,name)						\
-type name(void)								\
-{									\
-	__syscall_nr(0, type, name);					\
-}
-
-#define _syscall1(type,name,type1,arg1)					\
-type name(type1 arg1)							\
-{									\
-	__syscall_nr(1, type, name, arg1);				\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)			\
-type name(type1 arg1, type2 arg2)					\
-{									\
-	__syscall_nr(2, type, name, arg1, arg2);			\
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
-type name(type1 arg1, type2 arg2, type3 arg3)				\
-{									\
-	__syscall_nr(3, type, name, arg1, arg2, arg3);			\
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4)		\
-{									\
-	__syscall_nr(4, type, name, arg1, arg2, arg3, arg4);		\
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)	\
-{									\
-	__syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5);	\
-}
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{									\
-	__syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
-}
-
-
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/linkage.h>
diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h
index 1d2c4ef..f7b21ee 100644
--- a/include/asm-ppc/highmem.h
+++ b/include/asm-ppc/highmem.h
@@ -79,7 +79,7 @@
 	unsigned long vaddr;
 
 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-	inc_preempt_count();
+	pagefault_disable();
 	if (!PageHighMem(page))
 		return page_address(page);
 
@@ -101,8 +101,7 @@
 	unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
 
 	if (vaddr < KMAP_FIX_BEGIN) { // FIXME
-		dec_preempt_count();
-		preempt_check_resched();
+		pagefault_enable();
 		return;
 	}
 
@@ -115,8 +114,7 @@
 	pte_clear(&init_mm, vaddr, kmap_pte+idx);
 	flush_tlb_page(NULL, vaddr);
 #endif
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 static inline struct page *kmap_atomic_to_page(void *ptr)
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 7664bac..9574fe8 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -8,12 +8,13 @@
 #ifndef _ASM_S390_SETUP_H
 #define _ASM_S390_SETUP_H
 
+#define COMMAND_LINE_SIZE 	896
+
 #ifdef __KERNEL__
 
 #include <asm/types.h>
 
 #define PARMAREA		0x10400
-#define COMMAND_LINE_SIZE 	896
 #define MEMORY_CHUNKS		16	/* max 0x7fff */
 #define IPL_PARMBLOCK_ORIGIN	0x2000
 
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index 71d3c21..fb6fef9 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -345,160 +345,6 @@
 
 #ifdef __KERNEL__
 
-#include <linux/err.h>
-
-#define __syscall_return(type, res)			     \
-do {							     \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-		errno = -(res);				     \
-		res = -1;				     \
-	}						     \
-	return (type) (res);				     \
-} while (0)
-
-#define _svc_clobber "1", "cc", "memory"
-
-#define _syscall0(type,name)					\
-type name(void) {						\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name)				\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall1(type,name,type1,arg1)				\
-type name(type1 arg1) {						\
-	register type1 __arg1 asm("2") = arg1;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1)					\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)		\
-type name(type1 arg1, type2 arg2) {				\
-	register type1 __arg1 asm("2") = arg1;			\
-	register type2 __arg2 asm("3") = arg2;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1),					\
-		  "d" (__arg2)					\
-		: _svc_clobber );				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
-type name(type1 arg1, type2 arg2, type3 arg3) {			\
-	register type1 __arg1 asm("2") = arg1;			\
-	register type2 __arg2 asm("3") = arg2;			\
-	register type3 __arg3 asm("4") = arg3;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1),					\
-		  "d" (__arg2),					\
-		  "d" (__arg3)					\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,	\
-		  type4,name4)					\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) {	\
-	register type1 __arg1 asm("2") = arg1;			\
-	register type2 __arg2 asm("3") = arg2;			\
-	register type3 __arg3 asm("4") = arg3;			\
-	register type4 __arg4 asm("5") = arg4;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1),					\
-		  "d" (__arg2),					\
-		  "d" (__arg3),					\
-		  "d" (__arg4)					\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,	\
-		  type4,name4,type5,name5)			\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4,	\
-	  type5 arg5) {						\
-	register type1 __arg1 asm("2") = arg1;			\
-	register type2 __arg2 asm("3") = arg2;			\
-	register type3 __arg3 asm("4") = arg3;			\
-	register type4 __arg4 asm("5") = arg4;			\
-	register type5 __arg5 asm("6") = arg5;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1),					\
-		  "d" (__arg2),					\
-		  "d" (__arg3),					\
-		  "d" (__arg4),					\
-		  "d" (__arg5)					\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_SYS_ALARM
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 8bdc1ba..28305c3 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -28,11 +28,11 @@
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_add	\n"
-"	add	%2, %0				\n"
-"	movco.l	%0, @%3				\n"
+"1:	movli.l @%2, %0		! atomic_add	\n"
+"	add	%1, %0				\n"
+"	movco.l	%0, @%2				\n"
 "	bf	1b				\n"
-	: "=&z" (tmp), "=r" (&v->counter)
+	: "=&z" (tmp)
 	: "r" (i), "r" (&v->counter)
 	: "t");
 #else
@@ -50,11 +50,11 @@
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_sub	\n"
-"	sub	%2, %0				\n"
-"	movco.l	%0, @%3				\n"
+"1:	movli.l @%2, %0		! atomic_sub	\n"
+"	sub	%1, %0				\n"
+"	movco.l	%0, @%2				\n"
 "	bf	1b				\n"
-	: "=&z" (tmp), "=r" (&v->counter)
+	: "=&z" (tmp)
 	: "r" (i), "r" (&v->counter)
 	: "t");
 #else
@@ -80,12 +80,12 @@
 
 #ifdef CONFIG_CPU_SH4A
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_add_return	\n"
-"	add	%2, %0					\n"
-"	movco.l	%0, @%3					\n"
+"1:	movli.l @%2, %0		! atomic_add_return	\n"
+"	add	%1, %0					\n"
+"	movco.l	%0, @%2					\n"
 "	bf	1b					\n"
 "	synco						\n"
-	: "=&z" (temp), "=r" (&v->counter)
+	: "=&z" (temp)
 	: "r" (i), "r" (&v->counter)
 	: "t");
 #else
@@ -109,12 +109,12 @@
 
 #ifdef CONFIG_CPU_SH4A
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_sub_return	\n"
-"	sub	%2, %0					\n"
-"	movco.l	%0, @%3					\n"
+"1:	movli.l @%2, %0		! atomic_sub_return	\n"
+"	sub	%1, %0					\n"
+"	movco.l	%0, @%2					\n"
 "	bf	1b					\n"
 "	synco						\n"
-	: "=&z" (temp), "=r" (&v->counter)
+	: "=&z" (temp)
 	: "r" (i), "r" (&v->counter)
 	: "t");
 #else
@@ -186,11 +186,11 @@
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_clear_mask	\n"
-"	and	%2, %0					\n"
-"	movco.l	%0, @%3					\n"
+"1:	movli.l @%2, %0		! atomic_clear_mask	\n"
+"	and	%1, %0					\n"
+"	movco.l	%0, @%2					\n"
 "	bf	1b					\n"
-	: "=&z" (tmp), "=r" (&v->counter)
+	: "=&z" (tmp)
 	: "r" (~mask), "r" (&v->counter)
 	: "t");
 #else
@@ -208,11 +208,11 @@
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_set_mask	\n"
-"	or	%2, %0					\n"
-"	movco.l	%0, @%3					\n"
+"1:	movli.l @%2, %0		! atomic_set_mask	\n"
+"	or	%1, %0					\n"
+"	movco.l	%0, @%2					\n"
 "	bf	1b					\n"
-	: "=&z" (tmp), "=r" (&v->counter)
+	: "=&z" (tmp)
 	: "r" (mask), "r" (&v->counter)
 	: "t");
 #else
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index beeea40..795047d 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -23,16 +23,20 @@
 	cpu_data->loops_per_jiffy = loops_per_jiffy;
 
 	switch (cpu_data->type) {
-	case CPU_SH7604:
+	case CPU_SH7604 ... CPU_SH7619:
 		*p++ = '2';
 		break;
+	case CPU_SH7206:
+		*p++ = '2';
+		*p++ = 'a';
+		break;
 	case CPU_SH7705 ... CPU_SH7300:
 		*p++ = '3';
 		break;
 	case CPU_SH7750 ... CPU_SH4_501:
 		*p++ = '4';
 		break;
-	case CPU_SH7770 ... CPU_SH7781:
+	case CPU_SH7770 ... CPU_SH7785:
 		*p++ = '4';
 		*p++ = 'a';
 		break;
diff --git a/include/asm-sh/clock.h b/include/asm-sh/clock.h
index fdfb75b..1df9280 100644
--- a/include/asm-sh/clock.h
+++ b/include/asm-sh/clock.h
@@ -4,6 +4,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/seq_file.h>
+#include <linux/clk.h>
 
 struct clk;
 
@@ -18,7 +19,7 @@
 struct clk {
 	struct list_head	node;
 	const char		*name;
-
+	int			id;
 	struct module		*owner;
 
 	struct clk		*parent;
@@ -40,22 +41,13 @@
 int clk_init(void);
 
 int __clk_enable(struct clk *);
-int clk_enable(struct clk *);
-
 void __clk_disable(struct clk *);
-void clk_disable(struct clk *);
 
-int clk_set_rate(struct clk *, unsigned long rate);
-unsigned long clk_get_rate(struct clk *);
 void clk_recalc_rate(struct clk *);
 
-struct clk *clk_get(const char *id);
-void clk_put(struct clk *);
-
 int clk_register(struct clk *);
 void clk_unregister(struct clk *);
 
 int show_clocks(struct seq_file *m);
 
 #endif /* __ASM_SH_CLOCK_H */
-
diff --git a/include/asm-sh/cpu-sh2/cache.h b/include/asm-sh/cpu-sh2/cache.h
index cd96402..20b9796 100644
--- a/include/asm-sh/cpu-sh2/cache.h
+++ b/include/asm-sh/cpu-sh2/cache.h
@@ -12,6 +12,7 @@
 
 #define L1_CACHE_SHIFT	4
 
+#if defined(CONFIG_CPU_SUBTYPE_SH7604)
 #define CCR		0xfffffe92	/* Address of Cache Control Register */
 
 #define CCR_CACHE_CE	0x01	/* Cache enable */
@@ -27,5 +28,26 @@
 #define CCR_CACHE_ORA		CCR_CACHE_TW
 #define CCR_CACHE_WT		0x00	/* SH-2 is _always_ write-through */
 
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define CCR1		0xffffffec
+#define CCR		CCR1
+
+#define CCR_CACHE_CE	0x01	/* Cache enable */
+#define CCR_CACHE_WT	0x06    /* CCR[bit1=1,bit2=1] */
+				/* 0x00000000-0x7fffffff: Write-through  */
+				/* 0x80000000-0x9fffffff: Write-back     */
+                                /* 0xc0000000-0xdfffffff: Write-through  */
+#define CCR_CACHE_CB	0x00    /* CCR[bit1=0,bit2=0] */
+				/* 0x00000000-0x7fffffff: Write-back     */
+				/* 0x80000000-0x9fffffff: Write-through  */
+                                /* 0xc0000000-0xdfffffff: Write-back     */
+#define CCR_CACHE_CF	0x08	/* Cache invalidate */
+
+#define CACHE_OC_ADDRESS_ARRAY	0xf0000000
+#define CACHE_OC_DATA_ARRAY	0xf1000000
+
+#define CCR_CACHE_ENABLE	CCR_CACHE_CE
+#define CCR_CACHE_INVALIDATE	CCR_CACHE_CF
+#endif
 #endif /* __ASM_CPU_SH2_CACHE_H */
 
diff --git a/include/asm-sh/cpu-sh2/freq.h b/include/asm-sh/cpu-sh2/freq.h
new file mode 100644
index 0000000..31de475
--- /dev/null
+++ b/include/asm-sh/cpu-sh2/freq.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-sh/cpu-sh2/freq.h
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2_FREQ_H
+#define __ASM_CPU_SH2_FREQ_H
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define FREQCR	0xf815ff80
+#endif
+
+#endif /* __ASM_CPU_SH2_FREQ_H */
+
diff --git a/include/asm-sh/cpu-sh2/mmu_context.h b/include/asm-sh/cpu-sh2/mmu_context.h
new file mode 100644
index 0000000..beeb299
--- /dev/null
+++ b/include/asm-sh/cpu-sh2/mmu_context.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-sh/cpu-sh2/mmu_context.h
+ *
+ * Copyright (C) 2003  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2_MMU_CONTEXT_H
+#define __ASM_CPU_SH2_MMU_CONTEXT_H
+
+/* No MMU */
+
+#endif /* __ASM_CPU_SH2_MMU_CONTEXT_H */
+
diff --git a/include/asm-sh/cpu-sh2/timer.h b/include/asm-sh/cpu-sh2/timer.h
new file mode 100644
index 0000000..a39c241
--- /dev/null
+++ b/include/asm-sh/cpu-sh2/timer.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_CPU_SH2_TIMER_H
+#define __ASM_CPU_SH2_TIMER_H
+
+/* Nothing needed yet */
+
+#endif /* __ASM_CPU_SH2_TIMER_H */
diff --git a/include/asm-sh/cpu-sh2a/addrspace.h b/include/asm-sh/cpu-sh2a/addrspace.h
new file mode 100644
index 0000000..3d2e9aa
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/addrspace.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/addrspace.h>
diff --git a/include/asm-sh/cpu-sh2a/cache.h b/include/asm-sh/cpu-sh2a/cache.h
new file mode 100644
index 0000000..3e4b9e4
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/cache.h
@@ -0,0 +1,39 @@
+/*
+ * include/asm-sh/cpu-sh2a/cache.h
+ *
+ * Copyright (C) 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2A_CACHE_H
+#define __ASM_CPU_SH2A_CACHE_H
+
+#define L1_CACHE_SHIFT	4
+
+#define CCR1		0xfffc1000
+#define CCR2		0xfffc1004
+
+/* CCR1 behaves more like the traditional CCR */
+#define CCR		CCR1
+
+/*
+ * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
+ * listed here are reserved.
+ */
+#define CCR_CACHE_CB	0x0000	/* Hack */
+#define CCR_CACHE_OCE	0x0001
+#define CCR_CACHE_WT	0x0002
+#define CCR_CACHE_OCI	0x0008	/* OCF */
+#define CCR_CACHE_ICE	0x0100
+#define CCR_CACHE_ICI	0x0800	/* ICF */
+
+#define CACHE_IC_ADDRESS_ARRAY	0xf0000000
+#define CACHE_OC_ADDRESS_ARRAY	0xf0800000
+
+#define CCR_CACHE_ENABLE	(CCR_CACHE_OCE | CCR_CACHE_ICE)
+#define CCR_CACHE_INVALIDATE	(CCR_CACHE_OCI | CCR_CACHE_ICI)
+
+#endif /* __ASM_CPU_SH2A_CACHE_H */
+
diff --git a/include/asm-sh/cpu-sh2a/cacheflush.h b/include/asm-sh/cpu-sh2a/cacheflush.h
new file mode 100644
index 0000000..fa3186c
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/cacheflush.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/cacheflush.h>
diff --git a/include/asm-sh/cpu-sh2a/dma.h b/include/asm-sh/cpu-sh2a/dma.h
new file mode 100644
index 0000000..0d5ad85
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/dma.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/dma.h>
diff --git a/include/asm-sh/cpu-sh2a/freq.h b/include/asm-sh/cpu-sh2a/freq.h
new file mode 100644
index 0000000..e518fff
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/freq.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-sh/cpu-sh2a/freq.h
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2A_FREQ_H
+#define __ASM_CPU_SH2A_FREQ_H
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7206)
+#define FREQCR	0xfffe0010
+#endif
+
+#endif /* __ASM_CPU_SH2A_FREQ_H */
+
diff --git a/include/asm-sh/cpu-sh2a/mmu_context.h b/include/asm-sh/cpu-sh2a/mmu_context.h
new file mode 100644
index 0000000..cd2387f
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/mmu_context.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/mmu_context.h>
diff --git a/include/asm-sh/cpu-sh2a/timer.h b/include/asm-sh/cpu-sh2a/timer.h
new file mode 100644
index 0000000..fee504a
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/timer.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/timer.h>
diff --git a/include/asm-sh/cpu-sh2a/ubc.h b/include/asm-sh/cpu-sh2a/ubc.h
new file mode 100644
index 0000000..cf28062
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/ubc.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/ubc.h>
diff --git a/include/asm-sh/cpu-sh2a/watchdog.h b/include/asm-sh/cpu-sh2a/watchdog.h
new file mode 100644
index 0000000..c1b3e24
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/watchdog.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/watchdog.h>
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 56cd4b9..37ab0c1 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -53,7 +53,7 @@
 	consistent_free(vaddr, size);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 				  enum dma_data_direction dir)
 {
 	consistent_sync(vaddr, size, (int)dir);
diff --git a/include/asm-sh/dma.h b/include/asm-sh/dma.h
index d9daa02..faf3051 100644
--- a/include/asm-sh/dma.h
+++ b/include/asm-sh/dma.h
@@ -14,9 +14,7 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/sysdev.h>
-#include <linux/device.h>
 #include <asm/cpu/dma.h>
-#include <asm/semaphore.h>
 
 /* The maximum address that we can perform a DMA transfer to on this platform */
 /* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any
@@ -46,16 +44,21 @@
  * DMAC (dma_info) flags
  */
 enum {
-	DMAC_CHANNELS_CONFIGURED	= 0x00,
-	DMAC_CHANNELS_TEI_CAPABLE	= 0x01,
+	DMAC_CHANNELS_CONFIGURED	= 0x01,
+	DMAC_CHANNELS_TEI_CAPABLE	= 0x02,	/* Transfer end interrupt */
 };
 
 /*
  * DMA channel capabilities / flags
  */
 enum {
-	DMA_TEI_CAPABLE			= 0x01,
-	DMA_CONFIGURED			= 0x02,
+	DMA_CONFIGURED			= 0x01,
+
+	/*
+	 * Transfer end interrupt, inherited from DMAC.
+	 * wait_queue used in dma_wait_for_completion.
+	 */
+	DMA_TEI_CAPABLE			= 0x02,
 };
 
 extern spinlock_t dma_spin_lock;
@@ -68,28 +71,31 @@
 
 	int (*get_residue)(struct dma_channel *chan);
 	int (*xfer)(struct dma_channel *chan);
-	void (*configure)(struct dma_channel *chan, unsigned long flags);
+	int (*configure)(struct dma_channel *chan, unsigned long flags);
+	int (*extend)(struct dma_channel *chan, unsigned long op, void *param);
 };
 
 struct dma_channel {
-	char dev_id[16];
+	char dev_id[16];		/* unique name per DMAC of channel */
 
-	unsigned int chan;		/* Physical channel number */
+	unsigned int chan;		/* DMAC channel number */
 	unsigned int vchan;		/* Virtual channel number */
+
 	unsigned int mode;
 	unsigned int count;
 
 	unsigned long sar;
 	unsigned long dar;
 
+	const char **caps;
+
 	unsigned long flags;
 	atomic_t busy;
 
-	struct semaphore sem;
 	wait_queue_head_t wait_queue;
 
 	struct sys_device dev;
-	char *name;
+	void *priv_data;
 };
 
 struct dma_info {
@@ -103,6 +109,12 @@
 	struct dma_channel *channels;
 
 	struct list_head list;
+	int first_channel_nr;
+};
+
+struct dma_chan_caps {
+	int ch_num;
+	const char **caplist;
 };
 
 #define to_dma_channel(channel) container_of(channel, struct dma_channel, dev)
@@ -121,6 +133,8 @@
 #define dma_read_page(chan, from, to)	\
 	dma_read(chan, from, to, PAGE_SIZE)
 
+extern int request_dma_bycap(const char **dmac, const char **caps,
+			     const char *dev_id);
 extern int request_dma(unsigned int chan, const char *dev_id);
 extern void free_dma(unsigned int chan);
 extern int get_dma_residue(unsigned int chan);
@@ -131,6 +145,10 @@
 
 extern int register_dmac(struct dma_info *info);
 extern void unregister_dmac(struct dma_info *info);
+extern struct dma_info *get_dma_info_by_name(const char *dmac_name);
+
+extern int dma_extend(unsigned int chan, unsigned long op, void *param);
+extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist);
 
 #ifdef CONFIG_SYSFS
 /* arch/sh/drivers/dma/dma-sysfs.c */
diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h
index fc050fd..43ca244 100644
--- a/include/asm-sh/elf.h
+++ b/include/asm-sh/elf.h
@@ -74,7 +74,7 @@
 #define ELF_ARCH	EM_SH
 
 #define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE	4096
+#define ELF_EXEC_PAGESIZE	PAGE_SIZE
 
 /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
    use of this is to invoke "./ld.so someprog" to test out a new version of
diff --git a/include/asm-sh/entry-macros.S b/include/asm-sh/entry-macros.S
new file mode 100644
index 0000000..500030e
--- /dev/null
+++ b/include/asm-sh/entry-macros.S
@@ -0,0 +1,33 @@
+! entry.S macro define
+	
+	.macro	cli
+	stc	sr, r0
+	or	#0xf0, r0
+	ldc	r0, sr
+	.endm
+
+	.macro	sti
+	mov	#0xf0, r11
+	extu.b	r11, r11
+	not	r11, r11
+	stc	sr, r10
+	and	r11, r10
+#ifdef CONFIG_HAS_SR_RB
+	stc	k_g_imask, r11
+	or	r11, r10
+#endif
+	ldc	r10, sr
+	.endm
+
+	.macro	get_current_thread_info, ti, tmp
+#ifdef CONFIG_HAS_SR_RB
+	stc	r7_bank, \ti
+#else
+	mov	#((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp
+	shll8	\tmp
+	shll2	\tmp
+	mov	r15, \ti
+	and	\tmp, \ti
+#endif	
+	.endm
+
diff --git a/include/asm-sh/irq-sh73180.h b/include/asm-sh/irq-sh73180.h
deleted file mode 100644
index b28af9a..0000000
--- a/include/asm-sh/irq-sh73180.h
+++ /dev/null
@@ -1,314 +0,0 @@
-#ifndef __ASM_SH_IRQ_SH73180_H
-#define __ASM_SH_IRQ_SH73180_H
-
-/*
- * linux/include/asm-sh/irq-sh73180.h
- *
- * Copyright (C) 2004 Takashi SHUDO <shudo@hitachi-ul.co.jp>
- */
-
-#undef INTC_IPRA
-#undef INTC_IPRB
-#undef INTC_IPRC
-#undef INTC_IPRD
-
-#undef DMTE0_IRQ
-#undef DMTE1_IRQ
-#undef DMTE2_IRQ
-#undef DMTE3_IRQ
-#undef DMTE4_IRQ
-#undef DMTE5_IRQ
-#undef DMTE6_IRQ
-#undef DMTE7_IRQ
-#undef DMAE_IRQ
-#undef DMA_IPR_ADDR
-#undef DMA_IPR_POS
-#undef DMA_PRIORITY
-
-#undef INTC_IMCR0
-#undef INTC_IMCR1
-#undef INTC_IMCR2
-#undef INTC_IMCR3
-#undef INTC_IMCR4
-#undef INTC_IMCR5
-#undef INTC_IMCR6
-#undef INTC_IMCR7
-#undef INTC_IMCR8
-#undef INTC_IMCR9
-#undef INTC_IMCR10
-
-
-#define INTC_IPRA  	0xA4080000UL
-#define INTC_IPRB  	0xA4080004UL
-#define INTC_IPRC  	0xA4080008UL
-#define INTC_IPRD  	0xA408000CUL
-#define INTC_IPRE  	0xA4080010UL
-#define INTC_IPRF  	0xA4080014UL
-#define INTC_IPRG  	0xA4080018UL
-#define INTC_IPRH  	0xA408001CUL
-#define INTC_IPRI  	0xA4080020UL
-#define INTC_IPRJ  	0xA4080024UL
-#define INTC_IPRK  	0xA4080028UL
-
-#define INTC_IMR0	0xA4080080UL
-#define INTC_IMR1	0xA4080084UL
-#define INTC_IMR2	0xA4080088UL
-#define INTC_IMR3	0xA408008CUL
-#define INTC_IMR4	0xA4080090UL
-#define INTC_IMR5	0xA4080094UL
-#define INTC_IMR6	0xA4080098UL
-#define INTC_IMR7	0xA408009CUL
-#define INTC_IMR8	0xA40800A0UL
-#define INTC_IMR9	0xA40800A4UL
-#define INTC_IMR10	0xA40800A8UL
-#define INTC_IMR11	0xA40800ACUL
-
-#define INTC_IMCR0	0xA40800C0UL
-#define INTC_IMCR1	0xA40800C4UL
-#define INTC_IMCR2	0xA40800C8UL
-#define INTC_IMCR3	0xA40800CCUL
-#define INTC_IMCR4	0xA40800D0UL
-#define INTC_IMCR5	0xA40800D4UL
-#define INTC_IMCR6	0xA40800D8UL
-#define INTC_IMCR7	0xA40800DCUL
-#define INTC_IMCR8	0xA40800E0UL
-#define INTC_IMCR9	0xA40800E4UL
-#define INTC_IMCR10	0xA40800E8UL
-#define INTC_IMCR11	0xA40800ECUL
-
-#define INTC_ICR0	0xA4140000UL
-#define INTC_ICR1	0xA414001CUL
-
-#define INTMSK0		0xa4140044
-#define INTMSKCLR0	0xa4140064
-#define INTC_INTPRI0	0xa4140010
-
-/*
-  NOTE:
-
-  *_IRQ = (INTEVT2 - 0x200)/0x20
-*/
-
-/* TMU0 */
-#define TMU0_IRQ	16
-#define TMU0_IPR_ADDR	INTC_IPRA
-#define TMU0_IPR_POS	 3
-#define TMU0_PRIORITY	 2
-
-#define TIMER_IRQ       16
-#define TIMER_IPR_ADDR  INTC_IPRA
-#define TIMER_IPR_POS    3
-#define TIMER_PRIORITY   2
-
-/* TMU1 */
-#define TMU1_IRQ	17
-#define TMU1_IPR_ADDR	INTC_IPRA
-#define TMU1_IPR_POS	 2
-#define TMU1_PRIORITY	 2
-
-/* TMU2 */
-#define TMU2_IRQ	18
-#define TMU2_IPR_ADDR	INTC_IPRA
-#define TMU2_IPR_POS	 1
-#define TMU2_PRIORITY	 2
-
-/* LCDC */
-#define LCDC_IRQ	28
-#define LCDC_IPR_ADDR	INTC_IPRB
-#define LCDC_IPR_POS	 2
-#define LCDC_PRIORITY	 2
-
-/* VIO (Video I/O) */
-#define CEU_IRQ		52
-#define BEU_IRQ		53
-#define VEU_IRQ		54
-#define VOU_IRQ		55
-#define VIO_IPR_ADDR	INTC_IPRE
-#define VIO_IPR_POS	 2
-#define VIO_PRIORITY	 2
-
-/* MFI (Multi Functional Interface) */
-#define MFI_IRQ		56
-#define MFI_IPR_ADDR	INTC_IPRE
-#define MFI_IPR_POS	 1
-#define MFI_PRIORITY	 2
-
-/* VPU (Video Processing Unit) */
-#define VPU_IRQ		60
-#define VPU_IPR_ADDR	INTC_IPRE
-#define VPU_IPR_POS	 0
-#define VPU_PRIORITY	 2
-
-/* 3DG */
-#define TDG_IRQ		63
-#define TDG_IPR_ADDR	INTC_IPRJ
-#define TDG_IPR_POS	 2
-#define TDG_PRIORITY	 2
-
-/* DMAC(1) */
-#define DMTE0_IRQ	48
-#define DMTE1_IRQ	49
-#define DMTE2_IRQ	50
-#define DMTE3_IRQ	51
-#define DMA1_IPR_ADDR	INTC_IPRE
-#define DMA1_IPR_POS	3
-#define DMA1_PRIORITY	7
-
-/* DMAC(2) */
-#define DMTE4_IRQ	76
-#define DMTE5_IRQ	77
-#define DMA2_IPR_ADDR	INTC_IPRF
-#define DMA2_IPR_POS	2
-#define DMA2_PRIORITY	7
-
-/* SCIF0 */
-#define SCIF_ERI_IRQ	80
-#define SCIF_RXI_IRQ	81
-#define SCIF_BRI_IRQ	82
-#define SCIF_TXI_IRQ	83
-#define SCIF_IPR_ADDR	INTC_IPRG
-#define SCIF_IPR_POS	3
-#define SCIF_PRIORITY	3
-
-/* SIOF0 */
-#define SIOF0_IRQ	84
-#define SIOF0_IPR_ADDR	INTC_IPRH
-#define SIOF0_IPR_POS	3
-#define SIOF0_PRIORITY	3
-
-/* FLCTL (Flash Memory Controller) */
-#define FLSTE_IRQ	92
-#define FLTEND_IRQ	93
-#define FLTRQ0_IRQ	94
-#define FLTRQ1_IRQ	95
-#define FLCTL_IPR_ADDR	INTC_IPRH
-#define FLCTL_IPR_POS	1
-#define FLCTL_PRIORITY	3
-
-/* IIC(0) (IIC Bus Interface) */
-#define IIC0_ALI_IRQ	96
-#define IIC0_TACKI_IRQ	97
-#define IIC0_WAITI_IRQ	98
-#define IIC0_DTEI_IRQ	99
-#define IIC0_IPR_ADDR	INTC_IPRH
-#define IIC0_IPR_POS	0
-#define IIC0_PRIORITY	3
-
-/* IIC(1) (IIC Bus Interface) */
-#define IIC1_ALI_IRQ	44
-#define IIC1_TACKI_IRQ	45
-#define IIC1_WAITI_IRQ	46
-#define IIC1_DTEI_IRQ	47
-#define IIC1_IPR_ADDR	INTC_IPRG
-#define IIC1_IPR_POS	0
-#define IIC1_PRIORITY	3
-
-/* SIO0 */
-#define SIO0_IRQ	88
-#define SIO0_IPR_ADDR	INTC_IPRI
-#define SIO0_IPR_POS	3
-#define SIO0_PRIORITY	3
-
-/* SDHI */
-#define SDHI_SDHII0_IRQ	100
-#define SDHI_SDHII1_IRQ	101
-#define SDHI_SDHII2_IRQ	102
-#define SDHI_SDHII3_IRQ	103
-#define SDHI_IPR_ADDR	INTC_IPRK
-#define SDHI_IPR_POS	0
-#define SDHI_PRIORITY	3
-
-/* SIU (Sound Interface Unit) */
-#define SIU_IRQ		108
-#define SIU_IPR_ADDR	INTC_IPRJ
-#define SIU_IPR_POS	1
-#define SIU_PRIORITY	3
-
-#define PORT_PACR	0xA4050100UL
-#define PORT_PBCR	0xA4050102UL
-#define PORT_PCCR	0xA4050104UL
-#define PORT_PDCR	0xA4050106UL
-#define PORT_PECR	0xA4050108UL
-#define PORT_PFCR	0xA405010AUL
-#define PORT_PGCR	0xA405010CUL
-#define PORT_PHCR	0xA405010EUL
-#define PORT_PJCR	0xA4050110UL
-#define PORT_PKCR	0xA4050112UL
-#define PORT_PLCR	0xA4050114UL
-#define PORT_SCPCR	0xA4050116UL
-#define PORT_PMCR	0xA4050118UL
-#define PORT_PNCR	0xA405011AUL
-#define PORT_PQCR	0xA405011CUL
-#define PORT_PRCR	0xA405011EUL
-#define PORT_PTCR	0xA405014CUL
-#define PORT_PUCR	0xA405014EUL
-#define PORT_PVCR	0xA4050150UL
-
-#define PORT_PSELA	0xA4050140UL
-#define PORT_PSELB	0xA4050142UL
-#define PORT_PSELC	0xA4050144UL
-#define PORT_PSELE	0xA4050158UL
-
-#define PORT_HIZCRA	0xA4050146UL
-#define PORT_HIZCRB	0xA4050148UL
-#define PORT_DRVCR	0xA405014AUL
-
-#define PORT_PADR  	0xA4050120UL
-#define PORT_PBDR  	0xA4050122UL
-#define PORT_PCDR  	0xA4050124UL
-#define PORT_PDDR  	0xA4050126UL
-#define PORT_PEDR  	0xA4050128UL
-#define PORT_PFDR  	0xA405012AUL
-#define PORT_PGDR  	0xA405012CUL
-#define PORT_PHDR  	0xA405012EUL
-#define PORT_PJDR  	0xA4050130UL
-#define PORT_PKDR  	0xA4050132UL
-#define PORT_PLDR  	0xA4050134UL
-#define PORT_SCPDR  	0xA4050136UL
-#define PORT_PMDR  	0xA4050138UL
-#define PORT_PNDR  	0xA405013AUL
-#define PORT_PQDR  	0xA405013CUL
-#define PORT_PRDR  	0xA405013EUL
-#define PORT_PTDR  	0xA405016CUL
-#define PORT_PUDR  	0xA405016EUL
-#define PORT_PVDR  	0xA4050170UL
-
-#define IRQ0_IRQ	32
-#define IRQ1_IRQ	33
-#define IRQ2_IRQ	34
-#define IRQ3_IRQ	35
-#define IRQ4_IRQ	36
-#define IRQ5_IRQ	37
-#define IRQ6_IRQ	38
-#define IRQ7_IRQ	39
-
-#define INTPRI00	0xA4140010UL
-
-#define IRQ0_IPR_ADDR	INTPRI00
-#define IRQ1_IPR_ADDR	INTPRI00
-#define IRQ2_IPR_ADDR	INTPRI00
-#define IRQ3_IPR_ADDR	INTPRI00
-#define IRQ4_IPR_ADDR	INTPRI00
-#define IRQ5_IPR_ADDR	INTPRI00
-#define IRQ6_IPR_ADDR	INTPRI00
-#define IRQ7_IPR_ADDR	INTPRI00
-
-#define IRQ0_IPR_POS	7
-#define IRQ1_IPR_POS	6
-#define IRQ2_IPR_POS	5
-#define IRQ3_IPR_POS	4
-#define IRQ4_IPR_POS	3
-#define IRQ5_IPR_POS	2
-#define IRQ6_IPR_POS	1
-#define IRQ7_IPR_POS	0
-
-#define IRQ0_PRIORITY	1
-#define IRQ1_PRIORITY	1
-#define IRQ2_PRIORITY	1
-#define IRQ3_PRIORITY	1
-#define IRQ4_PRIORITY	1
-#define IRQ5_PRIORITY	1
-#define IRQ6_PRIORITY	1
-#define IRQ7_PRIORITY	1
-
-#endif /* __ASM_SH_IRQ_SH73180_H */
diff --git a/include/asm-sh/irq-sh7343.h b/include/asm-sh/irq-sh7343.h
deleted file mode 100644
index 5d15419..0000000
--- a/include/asm-sh/irq-sh7343.h
+++ /dev/null
@@ -1,317 +0,0 @@
-#ifndef __ASM_SH_IRQ_SH7343_H
-#define __ASM_SH_IRQ_SH7343_H
-
-/*
- * linux/include/asm-sh/irq-sh7343.h
- *
- * Copyright (C) 2006 Kenati Technologies Inc.
- * Andre Mccurdy <andre@kenati.com>
- * Ranjit Deshpande <ranjit@kenati.com>
- */
-
-#undef INTC_IPRA
-#undef INTC_IPRB
-#undef INTC_IPRC
-#undef INTC_IPRD
-
-#undef DMTE0_IRQ
-#undef DMTE1_IRQ
-#undef DMTE2_IRQ
-#undef DMTE3_IRQ
-#undef DMTE4_IRQ
-#undef DMTE5_IRQ
-#undef DMTE6_IRQ
-#undef DMTE7_IRQ
-#undef DMAE_IRQ
-#undef DMA_IPR_ADDR
-#undef DMA_IPR_POS
-#undef DMA_PRIORITY
-
-#undef INTC_IMCR0
-#undef INTC_IMCR1
-#undef INTC_IMCR2
-#undef INTC_IMCR3
-#undef INTC_IMCR4
-#undef INTC_IMCR5
-#undef INTC_IMCR6
-#undef INTC_IMCR7
-#undef INTC_IMCR8
-#undef INTC_IMCR9
-#undef INTC_IMCR10
-
-
-#define INTC_IPRA  	0xA4080000UL
-#define INTC_IPRB  	0xA4080004UL
-#define INTC_IPRC  	0xA4080008UL
-#define INTC_IPRD  	0xA408000CUL
-#define INTC_IPRE  	0xA4080010UL
-#define INTC_IPRF  	0xA4080014UL
-#define INTC_IPRG  	0xA4080018UL
-#define INTC_IPRH  	0xA408001CUL
-#define INTC_IPRI  	0xA4080020UL
-#define INTC_IPRJ  	0xA4080024UL
-#define INTC_IPRK  	0xA4080028UL
-#define INTC_IPRL  	0xA408002CUL
-
-#define INTC_IMR0	0xA4080080UL
-#define INTC_IMR1	0xA4080084UL
-#define INTC_IMR2	0xA4080088UL
-#define INTC_IMR3	0xA408008CUL
-#define INTC_IMR4	0xA4080090UL
-#define INTC_IMR5	0xA4080094UL
-#define INTC_IMR6	0xA4080098UL
-#define INTC_IMR7	0xA408009CUL
-#define INTC_IMR8	0xA40800A0UL
-#define INTC_IMR9	0xA40800A4UL
-#define INTC_IMR10	0xA40800A8UL
-#define INTC_IMR11	0xA40800ACUL
-
-#define INTC_IMCR0	0xA40800C0UL
-#define INTC_IMCR1	0xA40800C4UL
-#define INTC_IMCR2	0xA40800C8UL
-#define INTC_IMCR3	0xA40800CCUL
-#define INTC_IMCR4	0xA40800D0UL
-#define INTC_IMCR5	0xA40800D4UL
-#define INTC_IMCR6	0xA40800D8UL
-#define INTC_IMCR7	0xA40800DCUL
-#define INTC_IMCR8	0xA40800E0UL
-#define INTC_IMCR9	0xA40800E4UL
-#define INTC_IMCR10	0xA40800E8UL
-#define INTC_IMCR11	0xA40800ECUL
-
-#define INTC_ICR0	0xA4140000UL
-#define INTC_ICR1	0xA414001CUL
-
-#define INTMSK0		0xa4140044
-#define INTMSKCLR0	0xa4140064
-#define INTC_INTPRI0	0xa4140010
-
-/*
-  NOTE:
-
-  *_IRQ = (INTEVT2 - 0x200)/0x20
-*/
-
-/* TMU0 */
-#define TMU0_IRQ	16
-#define TMU0_IPR_ADDR	INTC_IPRA
-#define TMU0_IPR_POS	 3
-#define TMU0_PRIORITY	 2
-
-#define TIMER_IRQ       16
-#define TIMER_IPR_ADDR  INTC_IPRA
-#define TIMER_IPR_POS    3
-#define TIMER_PRIORITY   2
-
-/* TMU1 */
-#define TMU1_IRQ	17
-#define TMU1_IPR_ADDR	INTC_IPRA
-#define TMU1_IPR_POS	 2
-#define TMU1_PRIORITY	 2
-
-/* TMU2 */
-#define TMU2_IRQ	18
-#define TMU2_IPR_ADDR	INTC_IPRA
-#define TMU2_IPR_POS	 1
-#define TMU2_PRIORITY	 2
-
-/* LCDC */
-#define LCDC_IRQ	28
-#define LCDC_IPR_ADDR	INTC_IPRB
-#define LCDC_IPR_POS	 2
-#define LCDC_PRIORITY	 2
-
-/* VIO (Video I/O) */
-#define CEU_IRQ		52
-#define BEU_IRQ		53
-#define VEU_IRQ		54
-#define VOU_IRQ		55
-#define VIO_IPR_ADDR	INTC_IPRE
-#define VIO_IPR_POS	 2
-#define VIO_PRIORITY	 2
-
-/* MFI (Multi Functional Interface) */
-#define MFI_IRQ		56
-#define MFI_IPR_ADDR	INTC_IPRE
-#define MFI_IPR_POS	 1
-#define MFI_PRIORITY	 2
-
-/* VPU (Video Processing Unit) */
-#define VPU_IRQ		60
-#define VPU_IPR_ADDR	INTC_IPRE
-#define VPU_IPR_POS	 0
-#define VPU_PRIORITY	 2
-
-/* 3DG */
-#define TDG_IRQ		63
-#define TDG_IPR_ADDR	INTC_IPRJ
-#define TDG_IPR_POS	 2
-#define TDG_PRIORITY	 2
-
-/* DMAC(1) */
-#define DMTE0_IRQ	48
-#define DMTE1_IRQ	49
-#define DMTE2_IRQ	50
-#define DMTE3_IRQ	51
-#define DMA1_IPR_ADDR	INTC_IPRE
-#define DMA1_IPR_POS	3
-#define DMA1_PRIORITY	7
-
-/* DMAC(2) */
-#define DMTE4_IRQ	76
-#define DMTE5_IRQ	77
-#define DMA2_IPR_ADDR	INTC_IPRF
-#define DMA2_IPR_POS	2
-#define DMA2_PRIORITY	7
-
-/* SCIF0 */
-#define SCIF_ERI_IRQ	80
-#define SCIF_RXI_IRQ	81
-#define SCIF_BRI_IRQ	82
-#define SCIF_TXI_IRQ	83
-#define SCIF_IPR_ADDR	INTC_IPRG
-#define SCIF_IPR_POS	3
-#define SCIF_PRIORITY	3
-
-/* SIOF0 */
-#define SIOF0_IRQ	84
-#define SIOF0_IPR_ADDR	INTC_IPRH
-#define SIOF0_IPR_POS	3
-#define SIOF0_PRIORITY	3
-
-/* FLCTL (Flash Memory Controller) */
-#define FLSTE_IRQ	92
-#define FLTEND_IRQ	93
-#define FLTRQ0_IRQ	94
-#define FLTRQ1_IRQ	95
-#define FLCTL_IPR_ADDR	INTC_IPRH
-#define FLCTL_IPR_POS	1
-#define FLCTL_PRIORITY	3
-
-/* IIC(0) (IIC Bus Interface) */
-#define IIC0_ALI_IRQ	96
-#define IIC0_TACKI_IRQ	97
-#define IIC0_WAITI_IRQ	98
-#define IIC0_DTEI_IRQ	99
-#define IIC0_IPR_ADDR	INTC_IPRH
-#define IIC0_IPR_POS	0
-#define IIC0_PRIORITY	3
-
-/* IIC(1) (IIC Bus Interface) */
-#define IIC1_ALI_IRQ	44
-#define IIC1_TACKI_IRQ	45
-#define IIC1_WAITI_IRQ	46
-#define IIC1_DTEI_IRQ	47
-#define IIC1_IPR_ADDR	INTC_IPRI
-#define IIC1_IPR_POS	0
-#define IIC1_PRIORITY	3
-
-/* SIO0 */
-#define SIO0_IRQ	88
-#define SIO0_IPR_ADDR	INTC_IPRI
-#define SIO0_IPR_POS	3
-#define SIO0_PRIORITY	3
-
-/* SDHI */
-#define SDHI_SDHII0_IRQ	100
-#define SDHI_SDHII1_IRQ	101
-#define SDHI_SDHII2_IRQ	102
-#define SDHI_SDHII3_IRQ	103
-#define SDHI_IPR_ADDR	INTC_IPRK
-#define SDHI_IPR_POS	0
-#define SDHI_PRIORITY	3
-
-/* SIU (Sound Interface Unit) */
-#define SIU_IRQ		108
-#define SIU_IPR_ADDR	INTC_IPRJ
-#define SIU_IPR_POS	1
-#define SIU_PRIORITY	3
-
-#define PORT_PACR	0xA4050100UL
-#define PORT_PBCR	0xA4050102UL
-#define PORT_PCCR	0xA4050104UL
-#define PORT_PDCR	0xA4050106UL
-#define PORT_PECR	0xA4050108UL
-#define PORT_PFCR	0xA405010AUL
-#define PORT_PGCR	0xA405010CUL
-#define PORT_PHCR	0xA405010EUL
-#define PORT_PJCR	0xA4050110UL
-#define PORT_PKCR	0xA4050112UL
-#define PORT_PLCR	0xA4050114UL
-#define PORT_SCPCR	0xA4050116UL
-#define PORT_PMCR	0xA4050118UL
-#define PORT_PNCR	0xA405011AUL
-#define PORT_PQCR	0xA405011CUL
-#define PORT_PRCR	0xA405011EUL
-#define PORT_PTCR	0xA405014CUL
-#define PORT_PUCR	0xA405014EUL
-#define PORT_PVCR	0xA4050150UL
-
-#define PORT_PSELA	0xA4050140UL
-#define PORT_PSELB	0xA4050142UL
-#define PORT_PSELC	0xA4050144UL
-#define PORT_PSELE	0xA4050158UL
-
-#define PORT_HIZCRA	0xA4050146UL
-#define PORT_HIZCRB	0xA4050148UL
-#define PORT_DRVCR	0xA405014AUL
-
-#define PORT_PADR  	0xA4050120UL
-#define PORT_PBDR  	0xA4050122UL
-#define PORT_PCDR  	0xA4050124UL
-#define PORT_PDDR  	0xA4050126UL
-#define PORT_PEDR  	0xA4050128UL
-#define PORT_PFDR  	0xA405012AUL
-#define PORT_PGDR  	0xA405012CUL
-#define PORT_PHDR  	0xA405012EUL
-#define PORT_PJDR  	0xA4050130UL
-#define PORT_PKDR  	0xA4050132UL
-#define PORT_PLDR  	0xA4050134UL
-#define PORT_SCPDR  	0xA4050136UL
-#define PORT_PMDR  	0xA4050138UL
-#define PORT_PNDR  	0xA405013AUL
-#define PORT_PQDR  	0xA405013CUL
-#define PORT_PRDR  	0xA405013EUL
-#define PORT_PTDR  	0xA405016CUL
-#define PORT_PUDR  	0xA405016EUL
-#define PORT_PVDR  	0xA4050170UL
-
-#define IRQ0_IRQ	32
-#define IRQ1_IRQ	33
-#define IRQ2_IRQ	34
-#define IRQ3_IRQ	35
-#define IRQ4_IRQ	36
-#define IRQ5_IRQ	37
-#define IRQ6_IRQ	38
-#define IRQ7_IRQ	39
-
-#define INTPRI00	0xA4140010UL
-
-#define IRQ0_IPR_ADDR	INTPRI00
-#define IRQ1_IPR_ADDR	INTPRI00
-#define IRQ2_IPR_ADDR	INTPRI00
-#define IRQ3_IPR_ADDR	INTPRI00
-#define IRQ4_IPR_ADDR	INTPRI00
-#define IRQ5_IPR_ADDR	INTPRI00
-#define IRQ6_IPR_ADDR	INTPRI00
-#define IRQ7_IPR_ADDR	INTPRI00
-
-#define IRQ0_IPR_POS	7
-#define IRQ1_IPR_POS	6
-#define IRQ2_IPR_POS	5
-#define IRQ3_IPR_POS	4
-#define IRQ4_IPR_POS	3
-#define IRQ5_IPR_POS	2
-#define IRQ6_IPR_POS	1
-#define IRQ7_IPR_POS	0
-
-#define IRQ0_PRIORITY	1
-#define IRQ1_PRIORITY	1
-#define IRQ2_PRIORITY	1
-#define IRQ3_PRIORITY	1
-#define IRQ4_PRIORITY	1
-#define IRQ5_PRIORITY	1
-#define IRQ6_PRIORITY	1
-#define IRQ7_PRIORITY	1
-
-#endif /* __ASM_SH_IRQ_SH7343_H */
diff --git a/include/asm-sh/irq-sh7780.h b/include/asm-sh/irq-sh7780.h
deleted file mode 100644
index 19912ae..0000000
--- a/include/asm-sh/irq-sh7780.h
+++ /dev/null
@@ -1,311 +0,0 @@
-#ifndef __ASM_SH_IRQ_SH7780_H
-#define __ASM_SH_IRQ_SH7780_H
-
-/*
- * linux/include/asm-sh/irq-sh7780.h
- *
- * Copyright (C) 2004 Takashi SHUDO <shudo@hitachi-ul.co.jp>
- */
-#define INTC_BASE	0xffd00000
-#define INTC_ICR0	(INTC_BASE+0x0)
-#define INTC_ICR1	(INTC_BASE+0x1c)
-#define INTC_INTPRI	(INTC_BASE+0x10)
-#define INTC_INTREQ	(INTC_BASE+0x24)
-#define INTC_INTMSK0	(INTC_BASE+0x44)
-#define INTC_INTMSK1	(INTC_BASE+0x48)
-#define INTC_INTMSK2	(INTC_BASE+0x40080)
-#define INTC_INTMSKCLR0	(INTC_BASE+0x64)
-#define INTC_INTMSKCLR1	(INTC_BASE+0x68)
-#define INTC_INTMSKCLR2	(INTC_BASE+0x40084)
-#define INTC_NMIFCR	(INTC_BASE+0xc0)
-#define INTC_USERIMASK	(INTC_BASE+0x30000)
-
-#define	INTC_INT2PRI0	(INTC_BASE+0x40000)
-#define	INTC_INT2PRI1	(INTC_BASE+0x40004)
-#define	INTC_INT2PRI2	(INTC_BASE+0x40008)
-#define	INTC_INT2PRI3	(INTC_BASE+0x4000c)
-#define	INTC_INT2PRI4	(INTC_BASE+0x40010)
-#define	INTC_INT2PRI5	(INTC_BASE+0x40014)
-#define	INTC_INT2PRI6	(INTC_BASE+0x40018)
-#define	INTC_INT2PRI7	(INTC_BASE+0x4001c)
-#define	INTC_INT2A0	(INTC_BASE+0x40030)
-#define	INTC_INT2A1	(INTC_BASE+0x40034)
-#define	INTC_INT2MSKR	(INTC_BASE+0x40038)
-#define	INTC_INT2MSKCR	(INTC_BASE+0x4003c)
-#define	INTC_INT2B0	(INTC_BASE+0x40040)
-#define	INTC_INT2B1	(INTC_BASE+0x40044)
-#define	INTC_INT2B2	(INTC_BASE+0x40048)
-#define	INTC_INT2B3	(INTC_BASE+0x4004c)
-#define	INTC_INT2B4	(INTC_BASE+0x40050)
-#define	INTC_INT2B5	(INTC_BASE+0x40054)
-#define	INTC_INT2B6	(INTC_BASE+0x40058)
-#define	INTC_INT2B7	(INTC_BASE+0x4005c)
-#define	INTC_INT2GPIC	(INTC_BASE+0x40090)
-/*
-  NOTE:
-  *_IRQ = (INTEVT2 - 0x200)/0x20
-*/
-/* IRQ 0-7 line external int*/
-#define IRQ0_IRQ	2
-#define IRQ0_IPR_ADDR	INTC_INTPRI
-#define IRQ0_IPR_POS	7
-#define IRQ0_PRIORITY	2
-
-#define IRQ1_IRQ	4
-#define IRQ1_IPR_ADDR	INTC_INTPRI
-#define IRQ1_IPR_POS	6
-#define IRQ1_PRIORITY	2
-
-#define IRQ2_IRQ	6
-#define IRQ2_IPR_ADDR	INTC_INTPRI
-#define IRQ2_IPR_POS	5
-#define IRQ2_PRIORITY	2
-
-#define IRQ3_IRQ	8
-#define IRQ3_IPR_ADDR	INTC_INTPRI
-#define IRQ3_IPR_POS	4
-#define IRQ3_PRIORITY	2
-
-#define IRQ4_IRQ	10
-#define IRQ4_IPR_ADDR	INTC_INTPRI
-#define IRQ4_IPR_POS	3
-#define IRQ4_PRIORITY	2
-
-#define IRQ5_IRQ	12
-#define IRQ5_IPR_ADDR	INTC_INTPRI
-#define IRQ5_IPR_POS	2
-#define IRQ5_PRIORITY	2
-
-#define IRQ6_IRQ	14
-#define IRQ6_IPR_ADDR	INTC_INTPRI
-#define IRQ6_IPR_POS	1
-#define IRQ6_PRIORITY	2
-
-#define IRQ7_IRQ	0
-#define IRQ7_IPR_ADDR	INTC_INTPRI
-#define IRQ7_IPR_POS	0
-#define IRQ7_PRIORITY	2
-
-/* TMU */
-/* ch0 */
-#define TMU_IRQ		28
-#define	TMU_IPR_ADDR	INTC_INT2PRI0
-#define	TMU_IPR_POS	3
-#define TMU_PRIORITY	2
-
-#define TIMER_IRQ	28
-#define	TIMER_IPR_ADDR	INTC_INT2PRI0
-#define	TIMER_IPR_POS	3
-#define TIMER_PRIORITY	2
-
-/* ch 1*/
-#define TMU_CH1_IRQ		29
-#define	TMU_CH1_IPR_ADDR	INTC_INT2PRI0
-#define	TMU_CH1_IPR_POS		2
-#define TMU_CH1_PRIORITY	2
-
-#define TIMER1_IRQ	29
-#define	TIMER1_IPR_ADDR	INTC_INT2PRI0
-#define	TIMER1_IPR_POS	2
-#define TIMER1_PRIORITY	2
-
-/* ch 2*/
-#define TMU_CH2_IRQ		30
-#define	TMU_CH2_IPR_ADDR	INTC_INT2PRI0
-#define	TMU_CH2_IPR_POS		1
-#define TMU_CH2_PRIORITY	2
-/* ch 2 Input capture */
-#define TMU_CH2IC_IRQ		31
-#define	TMU_CH2IC_IPR_ADDR	INTC_INT2PRI0
-#define	TMU_CH2IC_IPR_POS	0
-#define TMU_CH2IC_PRIORITY	2
-/* ch 3 */
-#define TMU_CH3_IRQ		96
-#define	TMU_CH3_IPR_ADDR	INTC_INT2PRI1
-#define	TMU_CH3_IPR_POS		3
-#define TMU_CH3_PRIORITY	2
-/* ch 4 */
-#define TMU_CH4_IRQ		97
-#define	TMU_CH4_IPR_ADDR	INTC_INT2PRI1
-#define	TMU_CH4_IPR_POS		2
-#define TMU_CH4_PRIORITY	2
-/* ch 5*/
-#define TMU_CH5_IRQ		98
-#define	TMU_CH5_IPR_ADDR	INTC_INT2PRI1
-#define	TMU_CH5_IPR_POS		1
-#define TMU_CH5_PRIORITY	2
-
-/* SCIF0 */
-#define SCIF0_ERI_IRQ	40
-#define SCIF0_RXI_IRQ	41
-#define SCIF0_BRI_IRQ	42
-#define SCIF0_TXI_IRQ	43
-#define	SCIF0_IPR_ADDR	INTC_INT2PRI2
-#define	SCIF0_IPR_POS	3
-#define SCIF0_PRIORITY	3
-
-/* SCIF1 */
-#define SCIF1_ERI_IRQ	76
-#define SCIF1_RXI_IRQ	77
-#define SCIF1_BRI_IRQ	78
-#define SCIF1_TXI_IRQ	79
-#define	SCIF1_IPR_ADDR	INTC_INT2PRI2
-#define	SCIF1_IPR_POS	2
-#define SCIF1_PRIORITY	3
-
-#define	WDT_IRQ		27
-#define	WDT_IPR_ADDR	INTC_INT2PRI2
-#define	WDT_IPR_POS	1
-#define	WDT_PRIORITY	2
-
-/* DMAC(0) */
-#define	DMINT0_IRQ	34
-#define	DMINT1_IRQ	35
-#define	DMINT2_IRQ	36
-#define	DMINT3_IRQ	37
-#define	DMINT4_IRQ	44
-#define	DMINT5_IRQ	45
-#define	DMINT6_IRQ	46
-#define	DMINT7_IRQ	47
-#define	DMAE_IRQ	38
-#define	DMA0_IPR_ADDR	INTC_INT2PRI3
-#define	DMA0_IPR_POS	2
-#define	DMA0_PRIORITY	7
-
-/* DMAC(1) */
-#define	DMINT8_IRQ	92
-#define	DMINT9_IRQ	93
-#define	DMINT10_IRQ	94
-#define	DMINT11_IRQ	95
-#define	DMA1_IPR_ADDR	INTC_INT2PRI3
-#define	DMA1_IPR_POS	1
-#define	DMA1_PRIORITY	7
-
-#define	DMTE0_IRQ	DMINT0_IRQ
-#define	DMTE4_IRQ	DMINT4_IRQ
-#define	DMA_IPR_ADDR	DMA0_IPR_ADDR
-#define	DMA_IPR_POS	DMA0_IPR_POS
-#define	DMA_PRIORITY	DMA0_PRIORITY
-
-/* CMT */
-#define	CMT_IRQ		56
-#define	CMT_IPR_ADDR	INTC_INT2PRI4
-#define	CMT_IPR_POS	3
-#define	CMT_PRIORITY	0
-
-/* HAC */
-#define	HAC_IRQ		60
-#define	HAC_IPR_ADDR	INTC_INT2PRI4
-#define	HAC_IPR_POS	2
-#define	CMT_PRIORITY	0
-
-/* PCIC(0) */
-#define	PCIC0_IRQ	64
-#define	PCIC0_IPR_ADDR	INTC_INT2PRI4
-#define	PCIC0_IPR_POS	1
-#define	PCIC0_PRIORITY	2
-
-/* PCIC(1) */
-#define	PCIC1_IRQ	65
-#define	PCIC1_IPR_ADDR	INTC_INT2PRI4
-#define	PCIC1_IPR_POS	0
-#define	PCIC1_PRIORITY	2
-
-/* PCIC(2) */
-#define	PCIC2_IRQ	66
-#define	PCIC2_IPR_ADDR	INTC_INT2PRI5
-#define	PCIC2_IPR_POS	3
-#define	PCIC2_PRIORITY	2
-
-/* PCIC(3) */
-#define	PCIC3_IRQ	67
-#define	PCIC3_IPR_ADDR	INTC_INT2PRI5
-#define	PCIC3_IPR_POS	2
-#define	PCIC3_PRIORITY	2
-
-/* PCIC(4) */
-#define	PCIC4_IRQ	68
-#define	PCIC4_IPR_ADDR	INTC_INT2PRI5
-#define	PCIC4_IPR_POS	1
-#define	PCIC4_PRIORITY	2
-
-/* PCIC(5) */
-#define	PCICERR_IRQ	69
-#define	PCICPWD3_IRQ	70
-#define	PCICPWD2_IRQ	71
-#define	PCICPWD1_IRQ	72
-#define	PCICPWD0_IRQ	73
-#define	PCIC5_IPR_ADDR	INTC_INT2PRI5
-#define	PCIC5_IPR_POS	0
-#define	PCIC5_PRIORITY	2
-
-/* SIOF */
-#define	SIOF_IRQ	80
-#define	SIOF_IPR_ADDR	INTC_INT2PRI6
-#define	SIOF_IPR_POS	3
-#define	SIOF_PRIORITY	3
-
-/* HSPI */
-#define	HSPI_IRQ	84
-#define	HSPI_IPR_ADDR	INTC_INT2PRI6
-#define	HSPI_IPR_POS	2
-#define	HSPI_PRIORITY	3
-
-/* MMCIF */
-#define	MMCIF_FSTAT_IRQ	88
-#define	MMCIF_TRAN_IRQ	89
-#define	MMCIF_ERR_IRQ	90
-#define	MMCIF_FRDY_IRQ	91
-#define	MMCIF_IPR_ADDR	INTC_INT2PRI6
-#define	MMCIF_IPR_POS	1
-#define	HSPI_PRIORITY	3
-
-/* SSI */
-#define	SSI_IRQ		100
-#define	SSI_IPR_ADDR	INTC_INT2PRI6
-#define	SSI_IPR_POS	0
-#define	SSI_PRIORITY	3
-
-/* FLCTL */
-#define	FLCTL_FLSTE_IRQ		104
-#define	FLCTL_FLTEND_IRQ	105
-#define	FLCTL_FLTRQ0_IRQ	106
-#define	FLCTL_FLTRQ1_IRQ	107
-#define	FLCTL_IPR_ADDR		INTC_INT2PRI7
-#define	FLCTL_IPR_POS		3
-#define	FLCTL_PRIORITY		3
-
-/* GPIO */
-#define	GPIO0_IRQ	108
-#define	GPIO1_IRQ	109
-#define	GPIO2_IRQ	110
-#define	GPIO3_IRQ	111
-#define	GPIO_IPR_ADDR	INTC_INT2PRI7
-#define	GPIO_IPR_POS	2
-#define	GPIO_PRIORITY	3
-
-#define	INTC_TMU0_MSK	0
-#define	INTC_TMU3_MSK	1
-#define	INTC_RTC_MSK	2
-#define	INTC_SCIF0_MSK	3
-#define	INTC_SCIF1_MSK	4
-#define	INTC_WDT_MSK	5
-#define	INTC_HUID_MSK	7
-#define	INTC_DMAC0_MSK	8
-#define	INTC_DMAC1_MSK	9
-#define	INTC_CMT_MSK	12
-#define	INTC_HAC_MSK	13
-#define	INTC_PCIC0_MSK	14
-#define	INTC_PCIC1_MSK	15
-#define	INTC_PCIC2_MSK	16
-#define	INTC_PCIC3_MSK	17
-#define	INTC_PCIC4_MSK	18
-#define	INTC_PCIC5_MSK	19
-#define	INTC_SIOF_MSK	20
-#define	INTC_HSPI_MSK	21
-#define	INTC_MMCIF_MSK	22
-#define	INTC_SSI_MSK	23
-#define	INTC_FLCTL_MSK	24
-#define	INTC_GPIO_MSK	25
-
-#endif /* __ASM_SH_IRQ_SH7780_H */
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h
index 6cd3e9e..fd57608 100644
--- a/include/asm-sh/irq.h
+++ b/include/asm-sh/irq.h
@@ -1,233 +1,9 @@
 #ifndef __ASM_SH_IRQ_H
 #define __ASM_SH_IRQ_H
 
-/*
- *
- * linux/include/asm-sh/irq.h
- *
- * Copyright (C) 1999  Niibe Yutaka & Takeshi Yaegashi
- * Copyright (C) 2000  Kazumoto Kojima
- * Copyright (C) 2003  Paul Mundt
- *
- */
-
 #include <asm/machvec.h>
 #include <asm/ptrace.h>		/* for pt_regs */
 
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
-
-#define INTC_DMAC0_MSK	0
-
-#if defined(CONFIG_CPU_SH3)
-#define INTC_IPRA	0xfffffee2UL
-#define INTC_IPRB	0xfffffee4UL
-#elif defined(CONFIG_CPU_SH4)
-#define INTC_IPRA	0xffd00004UL
-#define INTC_IPRB	0xffd00008UL
-#define INTC_IPRC	0xffd0000cUL
-#define INTC_IPRD	0xffd00010UL
-#endif
-
-#define TIMER_IRQ	16
-#define TIMER_IPR_ADDR	INTC_IPRA
-#define TIMER_IPR_POS	 3
-#define TIMER_PRIORITY	 2
-
-#define TIMER1_IRQ	17
-#define TIMER1_IPR_ADDR	INTC_IPRA
-#define TIMER1_IPR_POS	 2
-#define TIMER1_PRIORITY	 4
-
-#define RTC_IRQ		22
-#define RTC_IPR_ADDR	INTC_IPRA
-#define RTC_IPR_POS	 0
-#define RTC_PRIORITY	TIMER_PRIORITY
-
-#if defined(CONFIG_CPU_SH3)
-#define DMTE0_IRQ	48
-#define DMTE1_IRQ	49
-#define DMTE2_IRQ	50
-#define DMTE3_IRQ	51
-#define DMA_IPR_ADDR	INTC_IPRE
-#define DMA_IPR_POS	3
-#define DMA_PRIORITY	7
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-/* TMU2 */
-#define TIMER2_IRQ      18
-#define TIMER2_IPR_ADDR INTC_IPRA
-#define TIMER2_IPR_POS   1
-#define TIMER2_PRIORITY  2
-
-/* WDT */
-#define WDT_IRQ		27
-#define WDT_IPR_ADDR	INTC_IPRB
-#define WDT_IPR_POS	 3
-#define WDT_PRIORITY	 2
-
-/* SIM (SIM Card Module) */
-#define SIM_ERI_IRQ	23
-#define SIM_RXI_IRQ	24
-#define SIM_TXI_IRQ	25
-#define SIM_TEND_IRQ	26
-#define SIM_IPR_ADDR	INTC_IPRB
-#define SIM_IPR_POS	 1
-#define SIM_PRIORITY	 2
-
-/* VIO (Video I/O) */
-#define VIO_IRQ		52
-#define VIO_IPR_ADDR	INTC_IPRE
-#define VIO_IPR_POS	 2
-#define VIO_PRIORITY	 2
-
-/* MFI (Multi Functional Interface) */
-#define MFI_IRQ		56
-#define MFI_IPR_ADDR	INTC_IPRE
-#define MFI_IPR_POS	 1
-#define MFI_PRIORITY	 2
-
-/* VPU (Video Processing Unit) */
-#define VPU_IRQ		60
-#define VPU_IPR_ADDR	INTC_IPRE
-#define VPU_IPR_POS	 0
-#define VPU_PRIORITY	 2
-
-/* KEY (Key Scan Interface) */
-#define KEY_IRQ		79
-#define KEY_IPR_ADDR	INTC_IPRF
-#define KEY_IPR_POS	 3
-#define KEY_PRIORITY	 2
-
-/* CMT (Compare Match Timer) */
-#define CMT_IRQ		104
-#define CMT_IPR_ADDR	INTC_IPRF
-#define CMT_IPR_POS	 0
-#define CMT_PRIORITY	 2
-
-/* DMAC(1) */
-#define DMTE0_IRQ	48
-#define DMTE1_IRQ	49
-#define DMTE2_IRQ	50
-#define DMTE3_IRQ	51
-#define DMA1_IPR_ADDR	INTC_IPRE
-#define DMA1_IPR_POS	3
-#define DMA1_PRIORITY	7
-
-/* DMAC(2) */
-#define DMTE4_IRQ	76
-#define DMTE5_IRQ	77
-#define DMA2_IPR_ADDR	INTC_IPRF
-#define DMA2_IPR_POS	2
-#define DMA2_PRIORITY	7
-
-/* SIOF0 */
-#define SIOF0_IRQ	84
-#define SIOF0_IPR_ADDR	INTC_IPRH
-#define SIOF0_IPR_POS	3
-#define SIOF0_PRIORITY	3
-
-/* FLCTL (Flash Memory Controller) */
-#define FLSTE_IRQ	92
-#define FLTEND_IRQ	93
-#define FLTRQ0_IRQ	94
-#define FLTRQ1_IRQ	95
-#define FLCTL_IPR_ADDR	INTC_IPRH
-#define FLCTL_IPR_POS	1
-#define FLCTL_PRIORITY	3
-
-/* IIC (IIC Bus Interface) */
-#define IIC_ALI_IRQ	96
-#define IIC_TACKI_IRQ	97
-#define IIC_WAITI_IRQ	98
-#define IIC_DTEI_IRQ	99
-#define IIC_IPR_ADDR	INTC_IPRH
-#define IIC_IPR_POS	0
-#define IIC_PRIORITY	3
-
-/* SIO0 */
-#define SIO0_IRQ	88
-#define SIO0_IPR_ADDR	INTC_IPRI
-#define SIO0_IPR_POS	3
-#define SIO0_PRIORITY	3
-
-/* SIU (Sound Interface Unit) */
-#define SIU_IRQ		108
-#define SIU_IPR_ADDR	INTC_IPRJ
-#define SIU_IPR_POS	1
-#define SIU_PRIORITY	3
-
-#endif
-#elif defined(CONFIG_CPU_SH4)
-#define DMTE0_IRQ	34
-#define DMTE1_IRQ	35
-#define DMTE2_IRQ	36
-#define DMTE3_IRQ	37
-#define DMTE4_IRQ	44	/* 7751R only */
-#define DMTE5_IRQ	45	/* 7751R only */
-#define DMTE6_IRQ	46	/* 7751R only */
-#define DMTE7_IRQ	47	/* 7751R only */
-#define DMAE_IRQ	38
-#define DMA_IPR_ADDR	INTC_IPRC
-#define DMA_IPR_POS	2
-#define DMA_PRIORITY	7
-#endif
-
-#if defined (CONFIG_CPU_SUBTYPE_SH7707) || defined (CONFIG_CPU_SUBTYPE_SH7708) || \
-    defined (CONFIG_CPU_SUBTYPE_SH7709) || defined (CONFIG_CPU_SUBTYPE_SH7750) || \
-    defined (CONFIG_CPU_SUBTYPE_SH7751) || defined (CONFIG_CPU_SUBTYPE_SH7706)
-#define SCI_ERI_IRQ	23
-#define SCI_RXI_IRQ	24
-#define SCI_TXI_IRQ	25
-#define SCI_IPR_ADDR	INTC_IPRB
-#define SCI_IPR_POS	1
-#define SCI_PRIORITY	3
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-#define SCIF0_IRQ	80
-#define SCIF0_IPR_ADDR	INTC_IPRG
-#define SCIF0_IPR_POS	3
-#define SCIF0_PRIORITY	3
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7707) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7709)
-#define SCIF_ERI_IRQ	56
-#define SCIF_RXI_IRQ	57
-#define SCIF_BRI_IRQ	58
-#define SCIF_TXI_IRQ	59
-#define SCIF_IPR_ADDR	INTC_IPRE
-#define SCIF_IPR_POS	1
-#define SCIF_PRIORITY	3
-
-#define IRDA_ERI_IRQ	52
-#define IRDA_RXI_IRQ	53
-#define IRDA_BRI_IRQ	54
-#define IRDA_TXI_IRQ	55
-#define IRDA_IPR_ADDR	INTC_IPRE
-#define IRDA_IPR_POS	2
-#define IRDA_PRIORITY	3
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \
-      defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
-#define SCIF_ERI_IRQ	40
-#define SCIF_RXI_IRQ	41
-#define SCIF_BRI_IRQ	42
-#define SCIF_TXI_IRQ	43
-#define SCIF_IPR_ADDR	INTC_IPRC
-#define SCIF_IPR_POS	1
-#define SCIF_PRIORITY	3
-#if defined(CONFIG_CPU_SUBTYPE_ST40STB1)
-#define SCIF1_ERI_IRQ	23
-#define SCIF1_RXI_IRQ	24
-#define SCIF1_BRI_IRQ	25
-#define SCIF1_TXI_IRQ	26
-#define SCIF1_IPR_ADDR	INTC_IPRB
-#define SCIF1_IPR_POS	1
-#define SCIF1_PRIORITY	3
-#endif /* ST40STB1 */
-
-#endif /* 775x / SH4-202 / ST40STB1 */
-#endif /* 7780 */
-
 /* NR_IRQS is made from three components:
  *   1. ONCHIP_NR_IRQS - number of IRLS + on-chip peripherial modules
  *   2. PINT_NR_IRQS   - number of PINT interrupts
@@ -265,6 +41,10 @@
 # define ONCHIP_NR_IRQS 109
 #elif defined(CONFIG_CPU_SUBTYPE_SH7780)
 # define ONCHIP_NR_IRQS 111
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+# define ONCHIP_NR_IRQS 256
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+# define ONCHIP_NR_IRQS 128
 #elif defined(CONFIG_SH_UNKNOWN)	/* Most be last */
 # define ONCHIP_NR_IRQS 144
 #endif
@@ -312,9 +92,11 @@
 /* NR_IRQS. 1+2+3 */
 #define NR_IRQS (ONCHIP_NR_IRQS + PINT_NR_IRQS + OFFCHIP_NR_IRQS)
 
-extern void disable_irq(unsigned int);
-extern void disable_irq_nosync(unsigned int);
-extern void enable_irq(unsigned int);
+/*
+ * Convert back and forth between INTEVT and IRQ values.
+ */
+#define evt2irq(evt)		(((evt) >> 5) - 16)
+#define irq2evt(irq)		(((irq) + 16) << 5)
 
 /*
  * Simple Mask Register Support
@@ -327,362 +109,36 @@
  */
 void init_IRQ_pint(void);
 
+/*
+ * The shift value is now the number of bits to shift, not the number of
+ * bits/4. This is to make it easier to read the value directly from the
+ * datasheets. The IPR address, addr, will be set from ipr_idx via the
+ * map_ipridx_to_addr function.
+ */
 struct ipr_data {
 	unsigned int irq;
-	unsigned int addr;	/* Address of Interrupt Priority Register */
-	int shift;		/* Shifts of the 16-bit data */
+	int ipr_idx;		/* Index for the IPR registered */
+	int shift;		/* Number of bits to shift the data */
 	int priority;		/* The priority */
+	unsigned int addr;	/* Address of Interrupt Priority Register */
 };
 
 /*
+ * Given an IPR IDX, map the value to an IPR register address.
+ */
+unsigned int map_ipridx_to_addr(int idx);
+
+/*
+ * Enable individual interrupt mode for external IPR IRQs.
+ */
+void ipr_irq_enable_irlm(void);
+
+/*
  * Function for "on chip support modules".
  */
-extern void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs);
-extern void make_imask_irq(unsigned int irq);
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-#undef INTC_IPRA
-#undef INTC_IPRB
-#define INTC_IPRA  	0xA414FEE2UL
-#define INTC_IPRB  	0xA414FEE4UL
-#define INTC_IPRC  	0xA4140016UL
-#define INTC_IPRD  	0xA4140018UL
-#define INTC_IPRE  	0xA414001AUL
-#define INTC_IPRF  	0xA4080000UL
-#define INTC_IPRG  	0xA4080002UL
-#define INTC_IPRH  	0xA4080004UL
-#define INTC_IPRI  	0xA4080006UL
-#define INTC_IPRJ  	0xA4080008UL
-
-#define INTC_IMR0	0xA4080040UL
-#define INTC_IMR1	0xA4080042UL
-#define INTC_IMR2	0xA4080044UL
-#define INTC_IMR3	0xA4080046UL
-#define INTC_IMR4	0xA4080048UL
-#define INTC_IMR5	0xA408004AUL
-#define INTC_IMR6	0xA408004CUL
-#define INTC_IMR7	0xA408004EUL
-#define INTC_IMR8	0xA4080050UL
-#define INTC_IMR9	0xA4080052UL
-#define INTC_IMR10	0xA4080054UL
-
-#define INTC_IMCR0	0xA4080060UL
-#define INTC_IMCR1	0xA4080062UL
-#define INTC_IMCR2	0xA4080064UL
-#define INTC_IMCR3	0xA4080066UL
-#define INTC_IMCR4	0xA4080068UL
-#define INTC_IMCR5	0xA408006AUL
-#define INTC_IMCR6	0xA408006CUL
-#define INTC_IMCR7	0xA408006EUL
-#define INTC_IMCR8	0xA4080070UL
-#define INTC_IMCR9	0xA4080072UL
-#define INTC_IMCR10	0xA4080074UL
-
-#define INTC_ICR0	0xA414FEE0UL
-#define INTC_ICR1	0xA4140010UL
-
-#define INTC_IRR0	0xA4140004UL
-
-#define PORT_PACR	0xA4050100UL
-#define PORT_PBCR	0xA4050102UL
-#define PORT_PCCR	0xA4050104UL
-#define PORT_PDCR	0xA4050106UL
-#define PORT_PECR	0xA4050108UL
-#define PORT_PFCR	0xA405010AUL
-#define PORT_PGCR	0xA405010CUL
-#define PORT_PHCR	0xA405010EUL
-#define PORT_PJCR	0xA4050110UL
-#define PORT_PKCR	0xA4050112UL
-#define PORT_PLCR	0xA4050114UL
-#define PORT_SCPCR	0xA4050116UL
-#define PORT_PMCR	0xA4050118UL
-#define PORT_PNCR	0xA405011AUL
-#define PORT_PQCR	0xA405011CUL
-
-#define PORT_PSELA	0xA4050140UL
-#define PORT_PSELB	0xA4050142UL
-#define PORT_PSELC	0xA4050144UL
-
-#define PORT_HIZCRA	0xA4050146UL
-#define PORT_HIZCRB	0xA4050148UL
-#define PORT_DRVCR	0xA4050150UL
-
-#define PORT_PADR  	0xA4050120UL
-#define PORT_PBDR  	0xA4050122UL
-#define PORT_PCDR  	0xA4050124UL
-#define PORT_PDDR  	0xA4050126UL
-#define PORT_PEDR  	0xA4050128UL
-#define PORT_PFDR  	0xA405012AUL
-#define PORT_PGDR  	0xA405012CUL
-#define PORT_PHDR  	0xA405012EUL
-#define PORT_PJDR  	0xA4050130UL
-#define PORT_PKDR  	0xA4050132UL
-#define PORT_PLDR  	0xA4050134UL
-#define PORT_SCPDR  	0xA4050136UL
-#define PORT_PMDR  	0xA4050138UL
-#define PORT_PNDR  	0xA405013AUL
-#define PORT_PQDR  	0xA405013CUL
-
-#define IRQ0_IRQ	32
-#define IRQ1_IRQ	33
-#define IRQ2_IRQ	34
-#define IRQ3_IRQ	35
-#define IRQ4_IRQ	36
-#define IRQ5_IRQ	37
-
-#define IRQ0_IPR_ADDR	INTC_IPRC
-#define IRQ1_IPR_ADDR	INTC_IPRC
-#define IRQ2_IPR_ADDR	INTC_IPRC
-#define IRQ3_IPR_ADDR	INTC_IPRC
-#define IRQ4_IPR_ADDR	INTC_IPRD
-#define IRQ5_IPR_ADDR	INTC_IPRD
-
-#define IRQ0_IPR_POS	0
-#define IRQ1_IPR_POS	1
-#define IRQ2_IPR_POS	2
-#define IRQ3_IPR_POS	3
-#define IRQ4_IPR_POS	0
-#define IRQ5_IPR_POS	1
-
-#define IRQ0_PRIORITY	1
-#define IRQ1_PRIORITY	1
-#define IRQ2_PRIORITY	1
-#define IRQ3_PRIORITY	1
-#define IRQ4_PRIORITY	1
-#define IRQ5_PRIORITY	1
-
-extern int ipr_irq_demux(int irq);
-#define __irq_demux(irq) ipr_irq_demux(irq)
-
-#elif defined(CONFIG_CPU_SUBTYPE_SH7604)
-#define INTC_IPRA	0xfffffee2UL
-#define INTC_IPRB	0xfffffe60UL
-
-#define INTC_VCRA	0xfffffe62UL
-#define INTC_VCRB	0xfffffe64UL
-#define INTC_VCRC	0xfffffe66UL
-#define INTC_VCRD	0xfffffe68UL
-
-#define INTC_VCRWDT	0xfffffee4UL
-#define INTC_VCRDIV	0xffffff0cUL
-#define INTC_VCRDMA0	0xffffffa0UL
-#define INTC_VCRDMA1	0xffffffa8UL
-
-#define INTC_ICR	0xfffffee0UL
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7707) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7709) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7710)
-#define INTC_IRR0	0xa4000004UL
-#define INTC_IRR1	0xa4000006UL
-#define INTC_IRR2	0xa4000008UL
-
-#define INTC_ICR0	0xfffffee0UL
-#define INTC_ICR1	0xa4000010UL
-#define INTC_ICR2	0xa4000012UL
-#define INTC_INTER	0xa4000014UL
-
-#define INTC_IPRC	0xa4000016UL
-#define INTC_IPRD	0xa4000018UL
-#define INTC_IPRE	0xa400001aUL
-#if defined(CONFIG_CPU_SUBTYPE_SH7707)
-#define INTC_IPRF	0xa400001cUL
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
-#define INTC_IPRF	0xa4080000UL
-#define INTC_IPRG	0xa4080002UL
-#define INTC_IPRH	0xa4080004UL
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710)
-/* Interrupt Controller Registers */
-#undef INTC_IPRA
-#undef INTC_IPRB
-#define INTC_IPRA  	0xA414FEE2UL
-#define INTC_IPRB  	0xA414FEE4UL
-#define INTC_IPRF  	0xA4080000UL
-#define INTC_IPRG  	0xA4080002UL
-#define INTC_IPRH  	0xA4080004UL
-#define INTC_IPRI  	0xA4080006UL
-
-#undef INTC_ICR0
-#undef INTC_ICR1
-#define INTC_ICR0	0xA414FEE0UL
-#define INTC_ICR1	0xA4140010UL
-
-#define INTC_IRR0	0xa4000004UL
-#define INTC_IRR1	0xa4000006UL
-#define INTC_IRR2	0xa4000008UL
-#define INTC_IRR3	0xa400000AUL
-#define INTC_IRR4	0xa400000CUL
-#define INTC_IRR5	0xa4080020UL
-#define INTC_IRR7	0xa4080024UL
-#define INTC_IRR8	0xa4080026UL
-
-/* Interrupt numbers */
-#define TIMER2_IRQ      18
-#define TIMER2_IPR_ADDR INTC_IPRA
-#define TIMER2_IPR_POS   1
-#define TIMER2_PRIORITY  2
-
-/* WDT */
-#define WDT_IRQ		27
-#define WDT_IPR_ADDR	INTC_IPRB
-#define WDT_IPR_POS	 3
-#define WDT_PRIORITY	 2
-
-#define SCIF0_ERI_IRQ	52
-#define SCIF0_RXI_IRQ	53
-#define SCIF0_BRI_IRQ	54
-#define SCIF0_TXI_IRQ	55
-#define SCIF0_IPR_ADDR	INTC_IPRE
-#define SCIF0_IPR_POS	2
-#define SCIF0_PRIORITY	3
-
-#define DMTE4_IRQ	76
-#define DMTE5_IRQ	77
-#define DMA2_IPR_ADDR	INTC_IPRF
-#define DMA2_IPR_POS	2
-#define DMA2_PRIORITY	7
-
-#define IPSEC_IRQ	79
-#define IPSEC_IPR_ADDR	INTC_IPRF
-#define IPSEC_IPR_POS	3
-#define IPSEC_PRIORITY	3
-
-/* EDMAC */
-#define EDMAC0_IRQ	80
-#define EDMAC0_IPR_ADDR	INTC_IPRG
-#define EDMAC0_IPR_POS	3
-#define EDMAC0_PRIORITY	3
-
-#define EDMAC1_IRQ	81
-#define EDMAC1_IPR_ADDR	INTC_IPRG
-#define EDMAC1_IPR_POS	2
-#define EDMAC1_PRIORITY	3
-
-#define EDMAC2_IRQ	82
-#define EDMAC2_IPR_ADDR	INTC_IPRG
-#define EDMAC2_IPR_POS	1
-#define EDMAC2_PRIORITY	3
-
-/* SIOF */
-#define SIOF0_ERI_IRQ	96
-#define SIOF0_TXI_IRQ	97
-#define SIOF0_RXI_IRQ	98
-#define SIOF0_CCI_IRQ	99
-#define SIOF0_IPR_ADDR	INTC_IPRH
-#define SIOF0_IPR_POS	0
-#define SIOF0_PRIORITY	7
-
-#define SIOF1_ERI_IRQ	100
-#define SIOF1_TXI_IRQ	101
-#define SIOF1_RXI_IRQ	102
-#define SIOF1_CCI_IRQ	103
-#define SIOF1_IPR_ADDR	INTC_IPRI
-#define SIOF1_IPR_POS	1
-#define SIOF1_PRIORITY	7
-#endif /* CONFIG_CPU_SUBTYPE_SH7710 */
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7710)
-#define PORT_PACR	0xa4050100UL
-#define PORT_PBCR	0xa4050102UL
-#define PORT_PCCR	0xa4050104UL
-#define PORT_PETCR	0xa4050106UL
-#define PORT_PADR  	0xa4050120UL
-#define PORT_PBDR  	0xa4050122UL
-#define PORT_PCDR  	0xa4050124UL
-#else
-#define PORT_PACR	0xa4000100UL
-#define PORT_PBCR	0xa4000102UL
-#define PORT_PCCR	0xa4000104UL
-#define PORT_PFCR	0xa400010aUL
-#define PORT_PADR  	0xa4000120UL
-#define PORT_PBDR  	0xa4000122UL
-#define PORT_PCDR  	0xa4000124UL
-#define PORT_PFDR  	0xa400012aUL
-#endif
-
-#define IRQ0_IRQ	32
-#define IRQ1_IRQ	33
-#define IRQ2_IRQ	34
-#define IRQ3_IRQ	35
-#define IRQ4_IRQ	36
-#define IRQ5_IRQ	37
-
-#define IRQ0_IPR_ADDR	INTC_IPRC
-#define IRQ1_IPR_ADDR	INTC_IPRC
-#define IRQ2_IPR_ADDR	INTC_IPRC
-#define IRQ3_IPR_ADDR	INTC_IPRC
-#define IRQ4_IPR_ADDR	INTC_IPRD
-#define IRQ5_IPR_ADDR	INTC_IPRD
-
-#define IRQ0_IPR_POS	0
-#define IRQ1_IPR_POS	1
-#define IRQ2_IPR_POS	2
-#define IRQ3_IPR_POS	3
-#define IRQ4_IPR_POS	0
-#define IRQ5_IPR_POS	1
-
-#define IRQ0_PRIORITY	1
-#define IRQ1_PRIORITY	1
-#define IRQ2_PRIORITY	1
-#define IRQ3_PRIORITY	1
-#define IRQ4_PRIORITY	1
-#define IRQ5_PRIORITY	1
-
-#define PINT0_IRQ	40
-#define PINT8_IRQ	41
-
-#define PINT0_IPR_ADDR	INTC_IPRD
-#define PINT8_IPR_ADDR	INTC_IPRD
-
-#define PINT0_IPR_POS	3
-#define PINT8_IPR_POS	2
-#define PINT0_PRIORITY	2
-#define PINT8_PRIORITY	2
-
-extern int ipr_irq_demux(int irq);
-#define __irq_demux(irq) ipr_irq_demux(irq)
-#endif /* CONFIG_CPU_SUBTYPE_SH7707 || CONFIG_CPU_SUBTYPE_SH7709 */
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \
-    defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
-#define INTC_ICR        0xffd00000
-#define INTC_ICR_NMIL	(1<<15)
-#define INTC_ICR_MAI	(1<<14)
-#define INTC_ICR_NMIB	(1<<9)
-#define INTC_ICR_NMIE	(1<<8)
-#define INTC_ICR_IRLM	(1<<7)
-#endif
-
-#ifdef CONFIG_CPU_SUBTYPE_SH7780
-#include <asm/irq-sh7780.h>
-#endif
-
-/* SH with INTC2-style interrupts */
-#ifdef CONFIG_CPU_HAS_INTC2_IRQ
-#if defined(CONFIG_CPU_SUBTYPE_ST40STB1)
-#define INTC2_BASE	0xfe080000
-#define INTC2_FIRST_IRQ 64
-#define INTC2_INTREQ_OFFSET	0x20
-#define INTC2_INTMSK_OFFSET	0x40
-#define INTC2_INTMSKCLR_OFFSET	0x60
-#define NR_INTC2_IRQS	25
-#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
-#define INTC2_BASE	0xfe080000
-#define INTC2_FIRST_IRQ 48	/* INTEVT 0x800 */
-#define INTC2_INTREQ_OFFSET	0x20
-#define INTC2_INTMSK_OFFSET	0x40
-#define INTC2_INTMSKCLR_OFFSET	0x60
-#define NR_INTC2_IRQS	64
-#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
-#define INTC2_BASE	0xffd40000
-#define INTC2_FIRST_IRQ	21
-#define INTC2_INTMSK_OFFSET	(0x38)
-#define INTC2_INTMSKCLR_OFFSET	(0x3c)
-#define NR_INTC2_IRQS	60
-#endif
-
-#define INTC2_INTPRI_OFFSET	0x00
+void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs);
+void make_imask_irq(unsigned int irq);
+void init_IRQ_ipr(void);
 
 struct intc2_data {
 	unsigned short irq;
@@ -693,20 +149,14 @@
 
 void make_intc2_irq(struct intc2_data *, unsigned int nr_irqs);
 void init_IRQ_intc2(void);
-#endif
-
-extern int shmse_irq_demux(int irq);
 
 static inline int generic_irq_demux(int irq)
 {
 	return irq;
 }
 
-#ifndef __irq_demux
-#define __irq_demux(irq)	(irq)
-#endif
 #define irq_canonicalize(irq)	(irq)
-#define irq_demux(irq)		__irq_demux(sh_mv.mv_irq_demux(irq))
+#define irq_demux(irq)		sh_mv.mv_irq_demux(irq)
 
 #ifdef CONFIG_4KSTACKS
 extern void irq_ctx_init(int cpu);
@@ -717,12 +167,4 @@
 # define irq_ctx_exit(cpu) do { } while (0)
 #endif
 
-#if defined(CONFIG_CPU_SUBTYPE_SH73180)
-#include <asm/irq-sh73180.h>
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7343)
-#include <asm/irq-sh7343.h>
-#endif
-
 #endif /* __ASM_SH_IRQ_H */
diff --git a/include/asm-sh/irqflags.h b/include/asm-sh/irqflags.h
new file mode 100644
index 0000000..9dedc1b
--- /dev/null
+++ b/include/asm-sh/irqflags.h
@@ -0,0 +1,123 @@
+#ifndef __ASM_SH_IRQFLAGS_H
+#define __ASM_SH_IRQFLAGS_H
+
+static inline void raw_local_irq_enable(void)
+{
+	unsigned long __dummy0, __dummy1;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"and	%1, %0\n\t"
+#ifdef CONFIG_CPU_HAS_SR_RB
+		"stc	r6_bank, %1\n\t"
+		"or	%1, %0\n\t"
+#endif
+		"ldc	%0, sr\n\t"
+		: "=&r" (__dummy0), "=r" (__dummy1)
+		: "1" (~0x000000f0)
+		: "memory"
+	);
+}
+
+static inline void raw_local_irq_disable(void)
+{
+	unsigned long flags;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"or	#0xf0, %0\n\t"
+		"ldc	%0, sr\n\t"
+		: "=&z" (flags)
+		: /* no inputs */
+		: "memory"
+	);
+}
+
+static inline void set_bl_bit(void)
+{
+	unsigned long __dummy0, __dummy1;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"or	%2, %0\n\t"
+		"and	%3, %0\n\t"
+		"ldc	%0, sr\n\t"
+		: "=&r" (__dummy0), "=r" (__dummy1)
+		: "r" (0x10000000), "r" (0xffffff0f)
+		: "memory"
+	);
+}
+
+static inline void clear_bl_bit(void)
+{
+	unsigned long __dummy0, __dummy1;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"and	%2, %0\n\t"
+		"ldc	%0, sr\n\t"
+		: "=&r" (__dummy0), "=r" (__dummy1)
+		: "1" (~0x10000000)
+		: "memory"
+	);
+}
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+	unsigned long flags;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"and	#0xf0, %0\n\t"
+		: "=&z" (flags)
+		: /* no inputs */
+		: "memory"
+	);
+
+	return flags;
+}
+
+#define raw_local_save_flags(flags) \
+		do { (flags) = __raw_local_save_flags(); } while (0)
+
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+	return (flags != 0);
+}
+
+static inline int raw_irqs_disabled(void)
+{
+	unsigned long flags = __raw_local_save_flags();
+
+	return raw_irqs_disabled_flags(flags);
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+	unsigned long flags, __dummy;
+
+	__asm__ __volatile__ (
+		"stc	sr, %1\n\t"
+		"mov	%1, %0\n\t"
+		"or	#0xf0, %0\n\t"
+		"ldc	%0, sr\n\t"
+		"mov	%1, %0\n\t"
+		"and	#0xf0, %0\n\t"
+		: "=&z" (flags), "=&r" (__dummy)
+		: /* no inputs */
+		: "memory"
+	);
+
+	return flags;
+}
+
+#define raw_local_irq_save(flags) \
+		do { (flags) = __raw_local_irq_save(); } while (0)
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+	if ((flags & 0xf0) != 0xf0)
+		raw_local_irq_enable();
+}
+
+#endif /* __ASM_SH_IRQFLAGS_H */
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index c7088ef..46f04e2 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -10,7 +10,6 @@
 
 #include <asm/cpu/mmu_context.h>
 #include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
@@ -42,10 +41,8 @@
 /*
  * Get MMU context if needed.
  */
-static __inline__ void
-get_mmu_context(struct mm_struct *mm)
+static inline void get_mmu_context(struct mm_struct *mm)
 {
-	extern void flush_tlb_all(void);
 	unsigned long mc = mmu_context_cache;
 
 	/* Check if we have old version of context. */
@@ -61,6 +58,7 @@
 		 * Flush all TLB and start new cycle.
 		 */
 		flush_tlb_all();
+
 		/*
 		 * Fix version; Note that we avoid version #0
 		 * to distingush NO_CONTEXT.
@@ -75,11 +73,10 @@
  * Initialize the context related info for a new mm_struct
  * instance.
  */
-static __inline__ int init_new_context(struct task_struct *tsk,
+static inline int init_new_context(struct task_struct *tsk,
 				       struct mm_struct *mm)
 {
 	mm->context.id = NO_CONTEXT;
-
 	return 0;
 }
 
@@ -87,12 +84,12 @@
  * Destroy context related info for an mm_struct that is about
  * to be put to rest.
  */
-static __inline__ void destroy_context(struct mm_struct *mm)
+static inline void destroy_context(struct mm_struct *mm)
 {
 	/* Do nothing */
 }
 
-static __inline__ void set_asid(unsigned long asid)
+static inline void set_asid(unsigned long asid)
 {
 	unsigned long __dummy;
 
@@ -105,7 +102,7 @@
 			        "r" (0xffffff00));
 }
 
-static __inline__ unsigned long get_asid(void)
+static inline unsigned long get_asid(void)
 {
 	unsigned long asid;
 
@@ -120,24 +117,29 @@
  * After we have set current->mm to a new value, this activates
  * the context for the new mm so we see the new mappings.
  */
-static __inline__ void activate_context(struct mm_struct *mm)
+static inline void activate_context(struct mm_struct *mm)
 {
 	get_mmu_context(mm);
 	set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
 }
 
-/* MMU_TTB can be used for optimizing the fault handling.
-   (Currently not used) */
-static __inline__ void switch_mm(struct mm_struct *prev,
-				 struct mm_struct *next,
-				 struct task_struct *tsk)
+/* MMU_TTB is used for optimizing the fault handling. */
+static inline void set_TTB(pgd_t *pgd)
+{
+	ctrl_outl((unsigned long)pgd, MMU_TTB);
+}
+
+static inline pgd_t *get_TTB(void)
+{
+	return (pgd_t *)ctrl_inl(MMU_TTB);
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+			     struct mm_struct *next,
+			     struct task_struct *tsk)
 {
 	if (likely(prev != next)) {
-		unsigned long __pgdir = (unsigned long)next->pgd;
-
-		__asm__ __volatile__("mov.l	%0, %1"
-				     : /* no output */
-				     : "r" (__pgdir), "m" (__m(MMU_TTB)));
+		set_TTB(next->pgd);
 		activate_context(next);
 	}
 }
@@ -147,7 +149,7 @@
 #define activate_mm(prev, next) \
 	switch_mm((prev),(next),NULL)
 
-static __inline__ void
+static inline void
 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index ca8b26d..380fd62 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -13,9 +13,16 @@
    [ P4 control   ]		0xE0000000
  */
 
-
 /* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT	12
+#if defined(CONFIG_PAGE_SIZE_4KB)
+# define PAGE_SHIFT	12
+#elif defined(CONFIG_PAGE_SIZE_8KB)
+# define PAGE_SHIFT	13
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+# define PAGE_SHIFT	16
+#else
+# error "Bogus kernel page size?"
+#endif
 
 #ifdef __ASSEMBLY__
 #define PAGE_SIZE	(1 << PAGE_SHIFT)
@@ -28,8 +35,14 @@
 
 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
 #define HPAGE_SHIFT	16
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+#define HPAGE_SHIFT	18
 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
 #define HPAGE_SHIFT	20
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define HPAGE_SHIFT	22
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
+#define HPAGE_SHIFT	26
 #endif
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -69,15 +82,25 @@
 /*
  * These are used to make use of C type-checking..
  */
-typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pgd; } pgd_t;
+#ifdef CONFIG_X2TLB
+typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long long pgprot; } pgprot_t;
+#define pte_val(x) \
+	((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#define __pte(x) \
+	({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
+#else
+typedef struct { unsigned long pte_low; } pte_t;
 typedef struct { unsigned long pgprot; } pgprot_t;
+#define pte_val(x)	((x).pte_low)
+#define __pte(x) ((pte_t) { (x) } )
+#endif
 
-#define pte_val(x)	((x).pte)
+typedef struct { unsigned long pgd; } pgd_t;
+
 #define pgd_val(x)	((x).pgd)
 #define pgprot_val(x)	((x).pgprot)
 
-#define __pte(x) ((pte_t) { (x) } )
 #define __pgd(x) ((pgd_t) { (x) } )
 #define __pgprot(x)	((pgprot_t) { (x) } )
 
diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h
index e841465..888e452 100644
--- a/include/asm-sh/pgalloc.h
+++ b/include/asm-sh/pgalloc.h
@@ -1,13 +1,16 @@
 #ifndef __ASM_SH_PGALLOC_H
 #define __ASM_SH_PGALLOC_H
 
-#define pmd_populate_kernel(mm, pmd, pte) \
-		set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+				       pte_t *pte)
+{
+	set_pmd(pmd, __pmd((unsigned long)pte));
+}
 
 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 				struct page *pte)
 {
-	set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
+	set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
 }
 
 /*
@@ -15,7 +18,16 @@
  */
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-	return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+
+	if (pgd) {
+		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+		memcpy(pgd + USER_PTRS_PER_PGD,
+		       swapper_pg_dir + USER_PTRS_PER_PGD,
+		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+	}
+
+	return pgd;
 }
 
 static inline void pgd_free(pgd_t *pgd)
diff --git a/include/asm-sh/pgtable-2level.h b/include/asm-sh/pgtable-2level.h
deleted file mode 100644
index b525db6..0000000
--- a/include/asm-sh/pgtable-2level.h
+++ /dev/null
@@ -1,70 +0,0 @@
-#ifndef __ASM_SH_PGTABLE_2LEVEL_H
-#define __ASM_SH_PGTABLE_2LEVEL_H
-
-/*
- * traditional two-level paging structure:
- */
-
-#define PGDIR_SHIFT	22
-#define PTRS_PER_PGD	1024
-
-/*
- * this is two-level, so we don't really have any
- * PMD directory physically.
- */
-#define PMD_SHIFT	22
-#define PTRS_PER_PMD	1
-
-#define PTRS_PER_PTE	1024
-
-#ifndef __ASSEMBLY__
-#define pte_ERROR(e) \
-	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pmd_ERROR(e) \
-	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pgd_ERROR(e) \
-	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd)		{ return 0; }
-static inline int pgd_bad(pgd_t pgd)		{ return 0; }
-static inline int pgd_present(pgd_t pgd)	{ return 1; }
-static inline void pgd_clear (pgd_t * pgdp) 	{ }
-
-/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified.  Thus, the following
- * hook is made available.
- */
-#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
-/*
- * (pmds are folded into pgds so this doesn't get actually called,
- * but the define is needed for a generic inline function.)
- */
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
-#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
-
-#define pgd_page_vaddr(pgd) \
-((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
-
-#define pgd_page(pgd) \
-	(phys_to_page(pgd_val(pgd)))
-
-static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
-{
-	return (pmd_t *) dir;
-}
-
-#define pte_pfn(x)		((unsigned long)(((x).pte >> PAGE_SHIFT)))
-#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ASM_SH_PGTABLE_2LEVEL_H */
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 2c8682a..c84901d 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -15,15 +15,10 @@
 #include <asm-generic/pgtable-nopmd.h>
 #include <asm/page.h>
 
-#define PTRS_PER_PGD		1024
-
 #ifndef __ASSEMBLY__
 #include <asm/addrspace.h>
 #include <asm/fixmap.h>
 
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern void paging_init(void);
-
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
@@ -33,15 +28,28 @@
 
 #endif /* !__ASSEMBLY__ */
 
-/* traditional two-level paging structure */
-#define PGDIR_SHIFT	22
-#define PTRS_PER_PMD	1
-#define PTRS_PER_PTE	1024
-#define PMD_SIZE	(1UL << PMD_SHIFT)
-#define PMD_MASK	(~(PMD_SIZE-1))
-#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+/*
+ * traditional two-level paging structure
+ */
+/* PTE bits */
+#ifdef CONFIG_X2TLB
+# define PTE_MAGNITUDE	3	/* 64-bit PTEs on extended mode SH-X2 TLB */
+#else
+# define PTE_MAGNITUDE	2	/* 32-bit PTEs */
+#endif
+#define PTE_SHIFT	PAGE_SHIFT
+#define PTE_BITS	(PTE_SHIFT - PTE_MAGNITUDE)
+
+/* PGD bits */
+#define PGDIR_SHIFT	(PTE_SHIFT + PTE_BITS)
+#define PGDIR_BITS	(32 - PGDIR_SHIFT)
+#define PGDIR_SIZE	(1 << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
+/* Entries per level */
+#define PTRS_PER_PTE	(PAGE_SIZE / 4)
+#define PTRS_PER_PGD	(PAGE_SIZE / 4)
+
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
 #define FIRST_USER_ADDRESS	0
 
@@ -49,7 +57,7 @@
 
 /*
  * First 1MB map is used by fixed purpose.
- * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c)
+ * Currently only 4-entry (16kB) is used (see arch/sh/mm/cache.c)
  */
 #define VMALLOC_START	(P3SEG+0x00100000)
 #define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
@@ -57,7 +65,8 @@
 /*
  * Linux PTEL encoding.
  *
- * Hardware and software bit definitions for the PTEL value:
+ * Hardware and software bit definitions for the PTEL value (see below for
+ * notes on SH-X2 MMUs and 64-bit PTEs):
  *
  * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4).
  *
@@ -76,20 +85,57 @@
  *
  * - Bits 31, 30, and 29 remain unused by everyone and can be used for future
  *   software flags, although care must be taken to update _PAGE_CLEAR_FLAGS.
+ *
+ * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
+ *
+ * SH-X2 MMUs and extended PTEs
+ *
+ * SH-X2 supports an extended mode TLB with split data arrays due to the
+ * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and
+ * SZ bit placeholders still exist in data array 1, but are implemented as
+ * reserved bits, with the real logic existing in data array 2.
+ *
+ * The downside to this is that we can no longer fit everything in to a 32-bit
+ * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus
+ * side, this gives us quite a few spare bits to play with for future usage.
  */
+/* Legacy and compat mode bits */
 #define	_PAGE_WT	0x001		/* WT-bit on SH-4, 0 on SH-3 */
 #define _PAGE_HW_SHARED	0x002		/* SH-bit  : shared among processes */
 #define _PAGE_DIRTY	0x004		/* D-bit   : page changed */
 #define _PAGE_CACHABLE	0x008		/* C-bit   : cachable */
-#define _PAGE_SZ0	0x010		/* SZ0-bit : Size of page */
-#define _PAGE_RW	0x020		/* PR0-bit : write access allowed */
-#define _PAGE_USER	0x040		/* PR1-bit : user space access allowed */
-#define _PAGE_SZ1	0x080		/* SZ1-bit : Size of page (on SH-4) */
+#ifndef CONFIG_X2TLB
+# define _PAGE_SZ0	0x010		/* SZ0-bit : Size of page */
+# define _PAGE_RW	0x020		/* PR0-bit : write access allowed */
+# define _PAGE_USER	0x040		/* PR1-bit : user space access allowed*/
+# define _PAGE_SZ1	0x080		/* SZ1-bit : Size of page (on SH-4) */
+#endif
 #define _PAGE_PRESENT	0x100		/* V-bit   : page is valid */
 #define _PAGE_PROTNONE	0x200		/* software: if not present  */
 #define _PAGE_ACCESSED	0x400		/* software: page referenced */
 #define _PAGE_FILE	_PAGE_WT	/* software: pagecache or swap? */
 
+/* Extended mode bits */
+#define _PAGE_EXT_ESZ0		0x0010	/* ESZ0-bit: Size of page */
+#define _PAGE_EXT_ESZ1		0x0020	/* ESZ1-bit: Size of page */
+#define _PAGE_EXT_ESZ2		0x0040	/* ESZ2-bit: Size of page */
+#define _PAGE_EXT_ESZ3		0x0080	/* ESZ3-bit: Size of page */
+
+#define _PAGE_EXT_USER_EXEC	0x0100	/* EPR0-bit: User space executable */
+#define _PAGE_EXT_USER_WRITE	0x0200	/* EPR1-bit: User space writable */
+#define _PAGE_EXT_USER_READ	0x0400	/* EPR2-bit: User space readable */
+
+#define _PAGE_EXT_KERN_EXEC	0x0800	/* EPR3-bit: Kernel space executable */
+#define _PAGE_EXT_KERN_WRITE	0x1000	/* EPR4-bit: Kernel space writable */
+#define _PAGE_EXT_KERN_READ	0x2000	/* EPR5-bit: Kernel space readable */
+
+/* Wrapper for extended mode pgprot twiddling */
+#ifdef CONFIG_X2TLB
+# define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
+#else
+# define _PAGE_EXT(x)		(0)
+#endif
+
 /* software: moves to PTEA.TC (Timing Control) */
 #define _PAGE_PCC_AREA5	0x00000000	/* use BSC registers for area5 */
 #define _PAGE_PCC_AREA6	0x80000000	/* use BSC registers for area6 */
@@ -114,37 +160,160 @@
 
 #define _PAGE_FLAGS_HARDWARE_MASK	(0x1fffffff & ~(_PAGE_CLEAR_FLAGS))
 
-/* Hardware flags: SZ0=1 (4k-byte) */
-#define _PAGE_FLAGS_HARD	_PAGE_SZ0
-
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define _PAGE_SZHUGE	(_PAGE_SZ1)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
-#define _PAGE_SZHUGE	(_PAGE_SZ0 | _PAGE_SZ1)
+/* Hardware flags, page size encoding */
+#if defined(CONFIG_X2TLB)
+# if defined(CONFIG_PAGE_SIZE_4KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_EXT(_PAGE_EXT_ESZ0)
+# elif defined(CONFIG_PAGE_SIZE_8KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_EXT(_PAGE_EXT_ESZ1)
+# elif defined(CONFIG_PAGE_SIZE_64KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_EXT(_PAGE_EXT_ESZ2)
+# endif
+#else
+# if defined(CONFIG_PAGE_SIZE_4KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_SZ0
+# elif defined(CONFIG_PAGE_SIZE_64KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_SZ1
+# endif
 #endif
 
-#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
+#if defined(CONFIG_X2TLB)
+# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ2)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ3)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
+# endif
+#else
+# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#  define _PAGE_SZHUGE	(_PAGE_SZ1)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#  define _PAGE_SZHUGE	(_PAGE_SZ0 | _PAGE_SZ1)
+# endif
+#endif
+
+/*
+ * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
+ * to make pte_mkhuge() happy.
+ */
+#ifndef _PAGE_SZHUGE
+# define _PAGE_SZHUGE	(_PAGE_FLAGS_HARD)
+#endif
+
+#define _PAGE_CHG_MASK \
+	(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_MMU
-#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
+#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_READ | \
+					   _PAGE_EXT_USER_WRITE))
+
+#define PAGE_EXECREAD	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_EXEC | \
+					   _PAGE_EXT_USER_READ))
+
+#define PAGE_COPY	PAGE_EXECREAD
+
+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_READ))
+
+#define PAGE_WRITEONLY	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_WRITE))
+
+#define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_WRITE | \
+					   _PAGE_EXT_USER_READ  | \
+					   _PAGE_EXT_USER_EXEC))
+
+#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
+				 _PAGE_DIRTY | _PAGE_ACCESSED | \
+				 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+					   _PAGE_EXT_KERN_WRITE | \
+					   _PAGE_EXT_KERN_EXEC))
+
 #define PAGE_KERNEL_NOCACHE \
-			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
-#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+			__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
+				 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
+				 _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+					   _PAGE_EXT_KERN_WRITE | \
+					   _PAGE_EXT_KERN_EXEC))
+
+#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
+				 _PAGE_DIRTY | _PAGE_ACCESSED | \
+				 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+					   _PAGE_EXT_KERN_EXEC))
+
 #define PAGE_KERNEL_PCC(slot, type) \
-			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | (type))
+			__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+					   _PAGE_EXT_KERN_WRITE | \
+					   _PAGE_EXT_KERN_EXEC) \
+				 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
+				 (type))
+
+#elif defined(CONFIG_MMU) /* SH-X TLB */
+#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+				 _PAGE_CACHABLE | _PAGE_ACCESSED | \
+				 _PAGE_FLAGS_HARD)
+
+#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_EXECREAD	PAGE_READONLY
+#define PAGE_RWX	PAGE_SHARED
+#define PAGE_WRITEONLY	PAGE_SHARED
+
+#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \
+				 _PAGE_DIRTY | _PAGE_ACCESSED | \
+				 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+
+#define PAGE_KERNEL_NOCACHE \
+			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
+				 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
+				 _PAGE_FLAGS_HARD)
+
+#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
+				 _PAGE_DIRTY | _PAGE_ACCESSED | \
+				 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+
+#define PAGE_KERNEL_PCC(slot, type) \
+			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
+				 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
+				 (type))
 #else /* no mmu */
 #define PAGE_NONE		__pgprot(0)
 #define PAGE_SHARED		__pgprot(0)
 #define PAGE_COPY		__pgprot(0)
+#define PAGE_EXECREAD		__pgprot(0)
+#define PAGE_RWX		__pgprot(0)
 #define PAGE_READONLY		__pgprot(0)
+#define PAGE_WRITEONLY		__pgprot(0)
 #define PAGE_KERNEL		__pgprot(0)
 #define PAGE_KERNEL_NOCACHE	__pgprot(0)
 #define PAGE_KERNEL_RO		__pgprot(0)
@@ -154,27 +323,32 @@
 #endif /* __ASSEMBLY__ */
 
 /*
- * As i386 and MIPS, SuperH can't do page protection for execute, and
- * considers that the same as a read.  Also, write permissions imply
- * read permissions. This is the closest we can get..
+ * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
+ * protection for execute, and considers it the same as a read. Also, write
+ * permission implies read permission. This is the closest we can get..
+ *
+ * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme,
+ * not only supporting separate execute, read, and write bits, but having
+ * completely separate permission bits for user and kernel space.
  */
+	 /*xwr*/
 #define __P000	PAGE_NONE
 #define __P001	PAGE_READONLY
 #define __P010	PAGE_COPY
 #define __P011	PAGE_COPY
-#define __P100	PAGE_READONLY
-#define __P101	PAGE_READONLY
+#define __P100	PAGE_EXECREAD
+#define __P101	PAGE_EXECREAD
 #define __P110	PAGE_COPY
 #define __P111	PAGE_COPY
 
 #define __S000	PAGE_NONE
 #define __S001	PAGE_READONLY
-#define __S010	PAGE_SHARED
+#define __S010	PAGE_WRITEONLY
 #define __S011	PAGE_SHARED
-#define __S100	PAGE_READONLY
-#define __S101	PAGE_READONLY
-#define __S110	PAGE_SHARED
-#define __S111	PAGE_SHARED
+#define __S100	PAGE_EXECREAD
+#define __S101	PAGE_EXECREAD
+#define __S110	PAGE_RWX
+#define __S111	PAGE_RWX
 
 #ifndef __ASSEMBLY__
 
@@ -183,7 +357,17 @@
  * within a page table are directly modified.  Thus, the following
  * hook is made available.
  */
+#ifdef CONFIG_X2TLB
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+	ptep->pte_high = pte.pte_high;
+	smp_wmb();
+	ptep->pte_low = pte.pte_low;
+}
+#else
 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#endif
+
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 
 /*
@@ -192,18 +376,18 @@
  */
 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
 
-#define pte_pfn(x)		((unsigned long)(((x).pte >> PAGE_SHIFT)))
+#define pte_pfn(x)		((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
 #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 
 #define pte_none(x)	(!pte_val(x))
 #define pte_present(x)	(pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 
 #define pmd_none(x)	(!pmd_val(x))
-#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
+#define pmd_present(x)	(pmd_val(x))
 #define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
-#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define	pmd_bad(x)	(pmd_val(x) & ~PAGE_MASK)
 
 #define pages_to_mb(x)	((x) >> (20-PAGE_SHIFT))
 #define pte_page(x)	phys_to_page(pte_val(x)&PTE_PHYS_MASK)
@@ -212,28 +396,52 @@
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
-static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
-static inline int pte_not_present(pte_t pte){ return !(pte_val(pte) & _PAGE_PRESENT); }
+#define pte_not_present(pte)	(!(pte_val(pte) & _PAGE_PRESENT))
+#define pte_dirty(pte)		(pte_val(pte) & _PAGE_DIRTY)
+#define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
+#define pte_file(pte)		(pte_val(pte) & _PAGE_FILE)
 
-static inline pte_t pte_rdprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-static inline pte_t pte_exprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-static inline pte_t pte_mkclean(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkold(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_wrprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
-static inline pte_t pte_mkread(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-static inline pte_t pte_mkexec(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-static inline pte_t pte_mkdirty(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkyoung(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkwrite(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-#ifdef CONFIG_HUGETLB_PAGE
-static inline pte_t pte_mkhuge(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
+#ifdef CONFIG_X2TLB
+#define pte_read(pte)		((pte).pte_high & _PAGE_EXT_USER_READ)
+#define pte_exec(pte)		((pte).pte_high & _PAGE_EXT_USER_EXEC)
+#define pte_write(pte)		((pte).pte_high & _PAGE_EXT_USER_WRITE)
+#else
+#define pte_read(pte)		(pte_val(pte) & _PAGE_USER)
+#define pte_exec(pte)		(pte_val(pte) & _PAGE_USER)
+#define pte_write(pte)		(pte_val(pte) & _PAGE_RW)
 #endif
 
+#define PTE_BIT_FUNC(h,fn,op) \
+static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
+
+#ifdef CONFIG_X2TLB
+/*
+ * We cheat a bit in the SH-X2 TLB case. As the permission bits are
+ * individually toggled (and user permissions are entirely decoupled from
+ * kernel permissions), we attempt to couple them a bit more sanely here.
+ */
+PTE_BIT_FUNC(high, rdprotect, &= ~_PAGE_EXT_USER_READ);
+PTE_BIT_FUNC(high, mkread, |= _PAGE_EXT_USER_READ | _PAGE_EXT_KERN_READ);
+PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
+PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
+PTE_BIT_FUNC(high, exprotect, &= ~_PAGE_EXT_USER_EXEC);
+PTE_BIT_FUNC(high, mkexec, |= _PAGE_EXT_USER_EXEC | _PAGE_EXT_KERN_EXEC);
+PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
+#else
+PTE_BIT_FUNC(low, rdprotect, &= ~_PAGE_USER);
+PTE_BIT_FUNC(low, mkread, |= _PAGE_USER);
+PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
+PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
+PTE_BIT_FUNC(low, exprotect, &= ~_PAGE_USER);
+PTE_BIT_FUNC(low, mkexec, |= _PAGE_USER);
+PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
+#endif
+
+PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
+PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
+PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
+PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
+
 /*
  * Macro and implementation to make a page protection as uncachable.
  */
@@ -258,13 +466,14 @@
 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
+{
+	set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+			    pgprot_val(newprot)));
+	return pte;
+}
 
-#define pmd_page_vaddr(pmd) \
-((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-#define pmd_page(pmd) \
-	(phys_to_page(pmd_val(pmd)))
+#define pmd_page_vaddr(pmd)	pmd_val(pmd)
+#define pmd_page(pmd)		(virt_to_page(pmd_val(pmd)))
 
 /* to find an entry in a page-table-directory. */
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
@@ -283,8 +492,15 @@
 #define pte_unmap(pte)		do { } while (0)
 #define pte_unmap_nested(pte)	do { } while (0)
 
+#ifdef CONFIG_X2TLB
+#define pte_ERROR(e) \
+	printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
+	       &(e), (e).pte_high, (e).pte_low)
+#else
 #define pte_ERROR(e) \
 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#endif
+
 #define pgd_ERROR(e) \
 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
@@ -337,6 +553,9 @@
 extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
 #endif
 
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init(void);
+
 #include <asm-generic/pgtable.h>
 
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index 45bb74e..6f1dd7c 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -36,7 +36,10 @@
  */
 enum cpu_type {
 	/* SH-2 types */
-	CPU_SH7604,
+	CPU_SH7604, CPU_SH7619,
+
+	/* SH-2A types */
+	CPU_SH7206,
 
 	/* SH-3 types */
 	CPU_SH7705, CPU_SH7706, CPU_SH7707,
@@ -47,7 +50,10 @@
 	/* SH-4 types */
 	CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R,
 	CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501,
+
+	/* SH-4A types */
 	CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781,
+	CPU_SH7785,
 
 	/* Unknown subtype */
 	CPU_SH_NONE
@@ -130,12 +136,11 @@
 };
 
 struct thread_struct {
+	/* Saved registers when thread is descheduled */
 	unsigned long sp;
 	unsigned long pc;
 
-	unsigned long trap_no, error_code;
-	unsigned long address;
-	/* Hardware debugging registers may come here */
+	/* Hardware debugging registers */
 	unsigned long ubc_pc;
 
 	/* floating point info */
@@ -150,12 +155,7 @@
 extern int ubc_usercnt;
 
 #define INIT_THREAD  {						\
-	sizeof(init_stack) + (long) &init_stack, /* sp */	\
-	0,					 /* pc */	\
-	0, 0,							\
-	0,							\
-	0,							\
-	{{{0,}},}				/* fpu state */	\
+	.sp = sizeof(init_stack) + (long) &init_stack,		\
 }
 
 /*
@@ -259,8 +259,8 @@
 		struct pt_regs *regs);
 extern unsigned long get_wchan(struct task_struct *p);
 
-#define KSTK_EIP(tsk)  ((tsk)->thread.pc)
-#define KSTK_ESP(tsk)  ((tsk)->thread.sp)
+#define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
 
 #define cpu_sleep()	__asm__ __volatile__ ("sleep" : : : "memory")
 #define cpu_relax()	barrier()
diff --git a/include/asm-sh/push-switch.h b/include/asm-sh/push-switch.h
new file mode 100644
index 0000000..dfc6bad
--- /dev/null
+++ b/include/asm-sh/push-switch.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_SH_PUSH_SWITCH_H
+#define __ASM_SH_PUSH_SWITCH_H
+
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct push_switch {
+	/* switch state */
+	unsigned int		state:1;
+	/* debounce timer */
+	struct timer_list	debounce;
+	/* workqueue */
+	struct work_struct	work;
+};
+
+struct push_switch_platform_info {
+	/* IRQ handler */
+	irqreturn_t		(*irq_handler)(int irq, void *data);
+	/* Special IRQ flags */
+	unsigned int		irq_flags;
+	/* Bit location of switch */
+	unsigned int		bit;
+	/* Symbolic switch name */
+	const char		*name;
+};
+
+#endif /* __ASM_SH_PUSH_SWITCH_H */
diff --git a/include/asm-sh/rwsem.h b/include/asm-sh/rwsem.h
index 9d2aea5..4931ba8 100644
--- a/include/asm-sh/rwsem.h
+++ b/include/asm-sh/rwsem.h
@@ -25,11 +25,21 @@
 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 	spinlock_t		wait_lock;
 	struct list_head	wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map	dep_map;
+#endif
 };
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
 #define __RWSEM_INITIALIZER(name) \
 	{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
-	  LIST_HEAD_INIT((name).wait_list) }
+	  LIST_HEAD_INIT((name).wait_list) \
+	  __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name)		\
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -39,6 +49,16 @@
 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
 
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+			 struct lock_class_key *key);
+
+#define init_rwsem(sem)				\
+do {						\
+	static struct lock_class_key __key;	\
+						\
+	__init_rwsem((sem), #sem, &__key);	\
+} while (0)
+
 static inline void init_rwsem(struct rw_semaphore *sem)
 {
 	sem->count = RWSEM_UNLOCKED_VALUE;
@@ -141,6 +161,11 @@
 		rwsem_downgrade_wake(sem);
 }
 
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+	__down_write(sem);
+}
+
 /*
  * implement exchange and add functionality
  */
diff --git a/include/asm-sh/se7206.h b/include/asm-sh/se7206.h
new file mode 100644
index 0000000..698eb80
--- /dev/null
+++ b/include/asm-sh/se7206.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_SH_SE7206_H
+#define __ASM_SH_SE7206_H
+
+#define PA_SMSC		0x30000000
+#define PA_MRSHPC	0x34000000
+#define PA_LED		0x31400000
+
+void init_se7206_IRQ(void);
+
+#define __IO_PREFIX	se7206
+#include <asm/io_generic.h>
+
+#endif /* __ASM_SH_SE7206_H */
diff --git a/include/asm-sh/setup.h b/include/asm-sh/setup.h
index 34ca8a7..1583c6b 100644
--- a/include/asm-sh/setup.h
+++ b/include/asm-sh/setup.h
@@ -1,10 +1,12 @@
-#ifdef __KERNEL__
 #ifndef _SH_SETUP_H
 #define _SH_SETUP_H
 
 #define COMMAND_LINE_SIZE 256
 
+#ifdef __KERNEL__
+
 int setup_early_printk(char *);
 
-#endif /* _SH_SETUP_H */
 #endif /* __KERNEL__ */
+
+#endif /* _SH_SETUP_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 3340126..b1e42e7 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -6,6 +6,7 @@
  * Copyright (C) 2002 Paul Mundt
  */
 
+#include <linux/irqflags.h>
 #include <asm/types.h>
 
 /*
@@ -131,103 +132,6 @@
 
 #define set_mb(var, value) do { xchg(&var, value); } while (0)
 
-/* Interrupt Control */
-#ifdef CONFIG_CPU_HAS_SR_RB
-static inline void local_irq_enable(void)
-{
-	unsigned long __dummy0, __dummy1;
-
-	__asm__ __volatile__("stc	sr, %0\n\t"
-			     "and	%1, %0\n\t"
-			     "stc	r6_bank, %1\n\t"
-			     "or	%1, %0\n\t"
-			     "ldc	%0, sr"
-			     : "=&r" (__dummy0), "=r" (__dummy1)
-			     : "1" (~0x000000f0)
-			     : "memory");
-}
-#else
-static inline void local_irq_enable(void)
-{
-	unsigned long __dummy0, __dummy1;
-
-	__asm__ __volatile__ (
-		"stc	sr, %0\n\t"
-		"and	%1, %0\n\t"
-		"ldc	%0, sr\n\t"
-		: "=&r" (__dummy0), "=r" (__dummy1)
-		: "1" (~0x000000f0)
-		: "memory");
-}
-#endif
-
-static inline void local_irq_disable(void)
-{
-	unsigned long __dummy;
-	__asm__ __volatile__("stc	sr, %0\n\t"
-			     "or	#0xf0, %0\n\t"
-			     "ldc	%0, sr"
-			     : "=&z" (__dummy)
-			     : /* no inputs */
-			     : "memory");
-}
-
-static inline void set_bl_bit(void)
-{
-	unsigned long __dummy0, __dummy1;
-
-	__asm__ __volatile__ ("stc	sr, %0\n\t"
-			     "or	%2, %0\n\t"
-			     "and	%3, %0\n\t"
-			     "ldc	%0, sr"
-			     : "=&r" (__dummy0), "=r" (__dummy1)
-			     : "r" (0x10000000), "r" (0xffffff0f)
-			     : "memory");
-}
-
-static inline void clear_bl_bit(void)
-{
-	unsigned long __dummy0, __dummy1;
-
-	__asm__ __volatile__ ("stc	sr, %0\n\t"
-			     "and	%2, %0\n\t"
-			     "ldc	%0, sr"
-			     : "=&r" (__dummy0), "=r" (__dummy1)
-			     : "1" (~0x10000000)
-			     : "memory");
-}
-
-#define local_save_flags(x) \
-	__asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
-
-#define irqs_disabled()			\
-({					\
-	unsigned long flags;		\
-	local_save_flags(flags);	\
-	(flags != 0);			\
-})
-
-static inline unsigned long local_irq_save(void)
-{
-	unsigned long flags, __dummy;
-
-	__asm__ __volatile__("stc	sr, %1\n\t"
-			     "mov	%1, %0\n\t"
-			     "or	#0xf0, %0\n\t"
-			     "ldc	%0, sr\n\t"
-			     "mov	%1, %0\n\t"
-			     "and	#0xf0, %0"
-			     : "=&z" (flags), "=&r" (__dummy)
-			     :/**/
-			     : "memory" );
-	return flags;
-}
-
-#define local_irq_restore(x) do {			\
-	if ((x & 0x000000f0) != 0x000000f0)		\
-		local_irq_enable();			\
-} while (0)
-
 /*
  * Jump to P2 area.
  * When handling TLB or caches, we need to do it from P2 area.
@@ -264,9 +168,6 @@
 		: "=&r" (__dummy));			\
 } while (0)
 
-/* For spinlocks etc */
-#define local_irq_save(x)	x = local_irq_save()
-
 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
 {
 	unsigned long flags, retval;
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 3ebc3f9..0c01dc5 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -90,13 +90,7 @@
 #endif
 #define free_thread_info(ti)	kfree(ti)
 
-#else /* !__ASSEMBLY__ */
-
-/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg) \
-	stc     r7_bank, reg
-
-#endif
+#endif /* __ASSEMBLY__ */
 
 /*
  * thread information flags
diff --git a/include/asm-sh/timer.h b/include/asm-sh/timer.h
index 5df842b..17b5e76 100644
--- a/include/asm-sh/timer.h
+++ b/include/asm-sh/timer.h
@@ -18,11 +18,32 @@
 
 	struct sys_device	dev;
 	struct sys_timer_ops	*ops;
+
+#ifdef CONFIG_NO_IDLE_HZ
+	struct dyn_tick_timer	*dyn_tick;
+#endif
 };
 
+#ifdef CONFIG_NO_IDLE_HZ
+#define DYN_TICK_ENABLED	(1 << 1)
+
+struct dyn_tick_timer {
+	spinlock_t	lock;
+	unsigned int	state;			/* Current state */
+	int		(*enable)(void);	/* Enables dynamic tick */
+	int		(*disable)(void);	/* Disables dynamic tick */
+	void		(*reprogram)(unsigned long); /* Reprograms the timer */
+	int		(*handler)(int, void *);
+};
+
+void timer_dyn_reprogram(void);
+#else
+#define timer_dyn_reprogram()	do { } while (0)
+#endif
+
 #define TICK_SIZE (tick_nsec / 1000)
 
-extern struct sys_timer tmu_timer;
+extern struct sys_timer tmu_timer, cmt_timer, mtu2_timer;
 extern struct sys_timer *sys_timer;
 
 #ifndef CONFIG_GENERIC_TIME
diff --git a/include/asm-sh/titan.h b/include/asm-sh/titan.h
index 270a4f4..03f3583 100644
--- a/include/asm-sh/titan.h
+++ b/include/asm-sh/titan.h
@@ -1,9 +1,8 @@
 /*
  * Platform defintions for Titan
  */
-
-#ifndef _ASM_SH_TITAN_TITAN_H
-#define _ASM_SH_TITAN_TITAN_H
+#ifndef _ASM_SH_TITAN_H
+#define _ASM_SH_TITAN_H
 
 #define __IO_PREFIX titan
 #include <asm/io_generic.h>
@@ -15,29 +14,4 @@
 #define TITAN_IRQ_MPCIB		11	/* mPCI B */
 #define TITAN_IRQ_USB		11	/* USB */
 
-/*
- * The external interrupt lines, these take up ints 0 - 15 inclusive
- * depending on the priority for the interrupt.  In fact the priority
- * is the interrupt :-)
- */
-#define IRL0_IRQ	0
-#define IRL0_IPR_ADDR	INTC_IPRD
-#define IRL0_IPR_POS	3
-#define IRL0_PRIORITY	8
-
-#define IRL1_IRQ	1
-#define IRL1_IPR_ADDR	INTC_IPRD
-#define IRL1_IPR_POS	2
-#define IRL1_PRIORITY	8
-
-#define IRL2_IRQ	2
-#define IRL2_IPR_ADDR	INTC_IPRD
-#define IRL2_IPR_POS	1
-#define IRL2_PRIORITY	8
-
-#define IRL3_IRQ	3
-#define IRL3_IPR_ADDR	INTC_IPRD
-#define IRL3_IPR_POS	0
-#define IRL3_PRIORITY	8
-
-#endif
+#endif /* __ASM_SH_TITAN_H */
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index 1c2abde..f982073 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -332,125 +332,6 @@
 
 #ifdef __KERNEL__
 
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
- * see <asm-sh/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* Avoid using "res" which is declared to be in register r0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-__asm__ __volatile__ ("trapa	#0x10" \
-	: "=z" (__sc0) \
-	: "0" (__sc0) \
-	: "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-__asm__ __volatile__ ("trapa	#0x11" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4) \
-	: "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-__asm__ __volatile__ ("trapa	#0x12" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5) \
-	: "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-__asm__ __volatile__ ("trapa	#0x13" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \
-	: "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-__asm__ __volatile__ ("trapa	#0x14" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6),  \
-	  "r" (__sc7) \
-	: "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
-register long __sc3 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-register long __sc0 __asm__ ("r0") = (long) arg5; \
-__asm__ __volatile__ ("trapa	#0x15" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7),  \
-	  "r" (__sc3) \
-	: "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
-register long __sc3 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-register long __sc0 __asm__ ("r0") = (long) arg5; \
-register long __sc1 __asm__ ("r1") = (long) arg6; \
-__asm__ __volatile__ ("trapa	#0x16" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7),  \
-	  "r" (__sc3), "r" (__sc1) \
-	: "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index 68e27a8..5efe906 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -35,7 +35,7 @@
 	consistent_free(NULL, size, vaddr, dma_handle);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 				  enum dma_data_direction dir)
 {
 	dma_cache_wback_inv((unsigned long)vaddr, size);
diff --git a/include/asm-sh64/setup.h b/include/asm-sh64/setup.h
index ebd42eb..5b07b14 100644
--- a/include/asm-sh64/setup.h
+++ b/include/asm-sh64/setup.h
@@ -1,6 +1,10 @@
 #ifndef __ASM_SH64_SETUP_H
 #define __ASM_SH64_SETUP_H
 
+#define COMMAND_LINE_SIZE 256
+
+#ifdef __KERNEL__
+
 #define PARAM ((unsigned char *)empty_zero_page)
 #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
 #define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
@@ -12,5 +16,7 @@
 #define COMMAND_LINE ((char *) (PARAM+256))
 #define COMMAND_LINE_SIZE 256
 
+#endif  /*  __KERNEL__  */
+
 #endif /* __ASM_SH64_SETUP_H */
 
diff --git a/include/asm-sh64/unistd.h b/include/asm-sh64/unistd.h
index ee7828b..1f38a7a 100644
--- a/include/asm-sh64/unistd.h
+++ b/include/asm-sh64/unistd.h
@@ -347,148 +347,6 @@
 #ifdef __KERNEL__ 
 
 #define NR_syscalls 321
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
- * see <asm-sh64/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	/* Note: when returning from kernel the return value is in r9	    \
-	**       This prevents conflicts between return value and arg1      \
-	**       when dispatching signal handler, in other words makes	    \
-	**       life easier in the system call epilogue (see entry.S)      \
-	*/								    \
-        register unsigned long __sr2 __asm__ ("r2") = res;		    \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) {	    \
-		errno = -(res);						    \
-		__sr2 = -1; 						    \
-	} \
-	return (type) (__sr2); 						    \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x10 << 16) | __NR_##name); \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "()"			    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0) ); 						    \
-__syscall_return(type,__sc0); 						    \
-}
-
-	/*
-	 * The apparent spurious "dummy" assembler comment is *needed*,
-	 * as without it, the compiler treats the arg<n> variables
-	 * as no longer live just before the asm. The compiler can
-	 * then optimize the storage into any registers it wishes.
-	 * The additional dummy statement forces the compiler to put
-	 * the arguments into the correct registers before the TRAPA.
-	 */
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x11 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2)"		    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2));					    \
-__asm__ __volatile__ ("!dummy	%0 %1"				   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2));					    \
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x12 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3)"		    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3) );			    \
-__asm__ __volatile__ ("!dummy	%0 %1 %2"			   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3) );			    \
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4)"		    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );		    \
-__asm__ __volatile__ ("!dummy	%0 %1 %2 %3"			   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );	   	    \
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x14 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4,%5)"	    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
-__asm__ __volatile__ ("!dummy	%0 %1 %2 %3 %4"			   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x15 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;	    \
-register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4,%5,%6)"	    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-	  "r" (__sc6));							    \
-__asm__ __volatile__ ("!dummy	%0 %1 %2 %3 %4 %5"		   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-	  "r" (__sc6));							    \
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x16 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;	    \
-register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5;	    \
-register unsigned long __sc7 __asm__ ("r7") = (unsigned long) arg6;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4,%5,%6,%7)"	    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-	  "r" (__sc6), "r" (__sc7));					    \
-__asm__ __volatile__ ("!dummy	%0 %1 %2 %3 %4 %5 %6"		   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-	  "r" (__sc6), "r" (__sc7));					    \
-__syscall_return(type,__sc0); 						    \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index f7827fa..d5b2f80 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -329,136 +329,6 @@
  *          find a free slot in the 0-302 range.
  */
 
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res)\
-		      : "r" (__g1) \
-		      : "o0", "cc"); \
-if (__res < -255 || __res >= 0) \
-    return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res >= 0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res >= 0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 27c46fb..2f858a2 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -181,7 +181,7 @@
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline int
 dma_get_cache_alignment(void)
@@ -210,7 +210,7 @@
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	/* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 7392fc4..876312f 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -45,7 +45,7 @@
 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
 		oparg = 1 << oparg;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -67,7 +67,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index 010f9cd..5891ff7 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -13,7 +13,7 @@
 #include <asm/page.h>
 
 /* Page table allocation/freeing. */
-extern kmem_cache_t *pgtable_cache;
+extern struct kmem_cache *pgtable_cache;
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 63669da..4704753 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -332,124 +332,6 @@
  *          find a free slot in the 0-302 range.
  */
 
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res)\
-		      : "r" (__g1) \
-		      : "o0", "cc"); \
-if (__res >= 0) \
-    return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__g1) \
-		      : "cc"); \
-if (__res >= 0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__g1) \
-		      : "cc"); \
-if (__res >= 0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
-		      : "cc"); \
-if (__res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
-		      : "cc"); \
-if (__res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
-		      : "cc"); \
-if (__res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
 /* sysconf options, for SunOS compatibility */
 #define   _SC_ARG_MAX             1
 #define   _SC_CHILD_MAX           2
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index babd298..f0ee4fb 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -94,7 +94,7 @@
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
 
 static inline int
 dma_get_cache_alignment(void)
@@ -112,7 +112,7 @@
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	BUG();
diff --git a/include/asm-v850/irq.h b/include/asm-v850/irq.h
index 1bf096d..88687c1 100644
--- a/include/asm-v850/irq.h
+++ b/include/asm-v850/irq.h
@@ -46,8 +46,6 @@
 init_irq_handlers (int base_irq, int num, int interval,
 		   struct hw_interrupt_type *irq_type);
 
-typedef void (*irq_handler_t)(int irq, void *data, struct pt_regs *regs);
-
 /* Handle interrupt IRQ.  REGS are the registers at the time of ther
    interrupt.  */
 extern unsigned int handle_irq (int irq, struct pt_regs *regs);
diff --git a/include/asm-v850/unistd.h b/include/asm-v850/unistd.h
index 737401e..2241ed4 100644
--- a/include/asm-v850/unistd.h
+++ b/include/asm-v850/unistd.h
@@ -204,168 +204,8 @@
 #define __NR_gettid		201
 #define __NR_tkill		202
 
-
-/* Syscall protocol:
-   Syscall number in r12, args in r6-r9, r13-r14
-   Return value in r10
-   Trap 0 for `short' syscalls, where all the args can fit in function
-   call argument registers, and trap 1 when there are additional args in
-   r13-r14.  */
-
-#define SYSCALL_NUM	"r12"
-#define SYSCALL_ARG0	"r6"
-#define SYSCALL_ARG1	"r7"
-#define SYSCALL_ARG2	"r8"
-#define SYSCALL_ARG3	"r9"
-#define SYSCALL_ARG4	"r13"
-#define SYSCALL_ARG5	"r14"
-#define SYSCALL_RET	"r10"
-
-#define SYSCALL_SHORT_TRAP	"0"
-#define SYSCALL_LONG_TRAP	"1"
-
-/* Registers clobbered by any syscall.  This _doesn't_ include the syscall
-   number (r12) or the `extended arg' registers (r13, r14), even though
-   they are actually clobbered too (this is because gcc's `asm' statement
-   doesn't allow a clobber to be used as an input or output).  */
-#define SYSCALL_CLOBBERS	"r1", "r5", "r11", "r15", "r16", \
-				"r17", "r18", "r19"
-
-/* Registers clobbered by a `short' syscall.  This includes all clobbers
-   except the syscall number (r12).  */
-#define SYSCALL_SHORT_CLOBBERS	SYSCALL_CLOBBERS, "r13", "r14"
-
 #ifdef __KERNEL__
 
-#include <asm/clinkage.h>
-#include <linux/err.h>
-
-#define __syscall_return(type, res)					      \
-  do {									      \
-	  /* user-visible error numbers are in the range -1 - -MAX_ERRNO:      \
-	     see <asm-v850/errno.h> */					      \
-	  if (__builtin_expect ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO), 0)) { \
-		  errno = -(res);					      \
-		  res = -1;						      \
-	  }								      \
-	  return (type) (res);						      \
-  } while (0)
-
-
-#define _syscall0(type, name)						      \
-type name (void)							      \
-{									      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)	 	      \
-			: "1" (__syscall)				      \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall1(type, name, atype, a)					      \
-type name (atype a)							      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)		      \
-			: "1" (__syscall), "r" (__a)			      \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall2(type, name, atype, a, btype, b)			      \
-type name (atype a, btype b)						      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)		      \
-			: "1" (__syscall), "r" (__a), "r" (__b)		      \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c)		      \
-type name (atype a, btype b, ctype c)					      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)		      \
-			: "1" (__syscall), "r" (__a), "r" (__b), "r" (__c)    \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d)	      \
-type name (atype a, btype b, ctype c, dtype d)				      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;			      \
-  register dtype __d __asm__ (SYSCALL_ARG3) = d;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)		      \
-			: "1" (__syscall),				      \
-			"r" (__a), "r" (__b), "r" (__c), "r" (__d)	      \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype,e)\
-type name (atype a, btype b, ctype c, dtype d, etype e)			      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;			      \
-  register dtype __d __asm__ (SYSCALL_ARG3) = d;			      \
-  register etype __e __asm__ (SYSCALL_ARG4) = e;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall), "=r" (__e)	      \
-			: "1" (__syscall),				      \
-			"r" (__a), "r" (__b), "r" (__c), "r" (__d), "2" (__e) \
-			: SYSCALL_CLOBBERS);				      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define __SYSCALL6_TRAP(syscall, ret, a, b, c, d, e, f)			      \
-  __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP			      \
-			: "=r" (ret), "=r" (syscall),			      \
- 			"=r" (e), "=r" (f)				      \
-			: "1" (syscall),				      \
-			"r" (a), "r" (b), "r" (c), "r" (d),		      \
-			"2" (e), "3" (f)				      \
-			: SYSCALL_CLOBBERS);
-
-#define _syscall6(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e, ftype, f) \
-type name (atype a, btype b, ctype c, dtype d, etype e, ftype f)	      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;			      \
-  register dtype __d __asm__ (SYSCALL_ARG3) = d;			      \
-  register etype __e __asm__ (SYSCALL_ARG4) = e;			      \
-  register etype __f __asm__ (SYSCALL_ARG5) = f;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __SYSCALL6_TRAP(__syscall, __ret, __a, __b, __c, __d, __e, __f);	      \
-  __syscall_return (type, __ret);					      \
-}
-		
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index 6c455b4..ebd7117 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -11,7 +11,6 @@
 header-y += msr.h
 header-y += prctl.h
 header-y += ptrace-abi.h
-header-y += setup.h
 header-y += sigcontext32.h
 header-y += ucontext.h
 header-y += vsyscall32.h
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 626d371..706ca4b 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -21,7 +21,7 @@
  * on us. We need to use _exactly_ the address the user gave us,
  * not some alias that contains the same information.
  */
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { int counter; } atomic_t;
 
 #define ATOMIC_INIT(i)	{ (i) }
 
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 10174b1..be9ec68 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -180,12 +180,13 @@
 	return boot_cpu_data.x86_clflush_size;
 }
 
-#define dma_is_consistent(h) 1
+#define dma_is_consistent(d, h) 1
 
 extern int dma_set_mask(struct device *dev, u64 mask);
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	enum dma_data_direction dir)
 {
 	flush_write_buffers();
 }
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
index 9804bf0..5cdfb08 100644
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -55,7 +55,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -78,7 +78,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index 7ae7e7d..e17b9ec4 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -113,13 +113,6 @@
 #define cpu_physical_id(cpu)		x86_cpu_to_apicid[cpu]
 #else
 #define cpu_physical_id(cpu)		boot_cpu_id
-static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
-				void *info, int retry, int wait)
-{
-	/* Disable interrupts here? */
-	func(info);
-	return 0;
-}
 #endif /* !CONFIG_SMP */
 #endif
 
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h
index 59efe84..4da9345 100644
--- a/include/asm-x86_64/spinlock_types.h
+++ b/include/asm-x86_64/spinlock_types.h
@@ -6,13 +6,13 @@
 #endif
 
 typedef struct {
-	volatile unsigned int slock;
+	unsigned int slock;
 } raw_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED	{ 1 }
 
 typedef struct {
-	volatile unsigned int lock;
+	unsigned int lock;
 } raw_rwlock_t;
 
 #define __RAW_RW_LOCK_UNLOCKED		{ RW_LOCK_BIAS }
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 777288e..c5f596e 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -622,25 +622,7 @@
 
 #define __NR_syscall_max __NR_move_pages
 
-#ifdef __KERNEL__
-#include <linux/err.h>
-#endif
-
 #ifndef __NO_STUBS
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO */
-
-#define __syscall_clobber "r11","rcx","memory" 
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-		errno = -(res); \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
 #define __ARCH_WANT_SYS_ALARM
@@ -664,87 +646,6 @@
 #define __ARCH_WANT_SYS_TIME
 #define __ARCH_WANT_COMPAT_SYS_TIME
 
-#define __syscall "syscall"
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-		  "d" ((long)(arg3)) : __syscall_clobber); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ;" __syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"g" ((long)(arg4)) : __syscall_clobber,"r10" ); \
-__syscall_return(type,__res); \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5)) : \
-	__syscall_clobber,"r8","r10" ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5,type6,arg6) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-	  "d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \
-	  "g" ((long)(arg6)) : \
-	__syscall_clobber,"r8","r10","r9" ); \
-__syscall_return(type,__res); \
-}
-
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
index c39c91d..82b03b3 100644
--- a/include/asm-xtensa/dma-mapping.h
+++ b/include/asm-xtensa/dma-mapping.h
@@ -170,10 +170,10 @@
 	return L1_CACHE_BYTES;
 }
 
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	consistent_sync(vaddr, size, direction);
diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h
index 411f810..2e1a1b9 100644
--- a/include/asm-xtensa/unistd.h
+++ b/include/asm-xtensa/unistd.h
@@ -218,190 +218,6 @@
 
 #define SYSXTENSA_COUNT		   5	/* count of syscall0 functions*/
 
-#ifdef __KERNEL__
-#include <linux/linkage.h>
-
-#define __syscall_return(type, res) return ((type)(res))
-
-/* Tensilica's xt-xcc compiler is much more agressive at code
- * optimization than gcc.  Multiple __asm__ statements are
- * insufficient for xt-xcc because subsequent optimization passes
- * (beyond the front-end that knows of __asm__ statements and other
- * such GNU Extensions to C) can modify the register selection for
- * containment of C variables.
- *
- * xt-xcc cannot modify the contents of a single __asm__ statement, so
- * we create single-asm versions of the syscall macros that are
- * suitable and optimal for both xt-xcc and gcc.
- *
- * Linux takes system-call arguments in registers.  The following
- * design is optimized for user-land apps (e.g., glibc) which
- * typically have a function wrapper around the "syscall" assembly
- * instruction.  It satisfies the Xtensa ABI while minizing argument
- * shifting.
- *
- * The Xtensa ABI and software conventions require the system-call
- * number in a2.  If an argument exists in a2, we move it to the next
- * available register.  Note that for improved efficiency, we do NOT
- * shift all parameters down one register to maintain the original
- * order.
- *
- * At best case (zero arguments), we just write the syscall number to
- * a2.  At worst case (1 to 6 arguments), we move the argument in a2
- * to the next available register, then write the syscall number to
- * a2.
- *
- * For clarity, the following truth table enumerates all possibilities.
- *
- * arguments	syscall number	arg0, arg1, arg2, arg3, arg4, arg5
- * ---------	--------------	----------------------------------
- *	0	      a2
- *	1	      a2	a3
- *	2	      a2	a4,   a3
- *	3	      a2	a5,   a3,   a4
- *	4	      a2	a6,   a3,   a4,   a5
- *	5	      a2	a7,   a3,   a4,   a5,   a6
- *	6	      a2	a8,   a3,   a4,   a5,   a6,   a7
- */
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name) \
-	: "a2" \
-	); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type0,arg0) \
-type name(type0 arg0) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a3, %2 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0) \
-	: "a2", "a3" \
-	); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type0,arg0,type1,arg1) \
-type name(type0 arg0,type1 arg1) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a4, %2 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1) \
-	: "a2", "a3", "a4" \
-	); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type0,arg0,type1,arg1,type2,arg2) \
-type name(type0 arg0,type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a5, %2 \n" \
-	"  mov   a4, %4 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2) \
-	: "a2", "a3", "a4", "a5" \
-	); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a6, %2 \n" \
-	"  mov   a5, %5 \n" \
-	"  mov   a4, %4 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), "a" (arg3) \
-	: "a2", "a3", "a4", "a5", "a6" \
-	); \
-__syscall_return(type,__res); \
-}
-
-/* Note that we save and restore the a7 frame pointer.
- * Including a7 in the clobber list doesn't do what you'd expect.
- */
-#define _syscall5(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a9, a7 \n" \
-	"  mov   a7, %2 \n" \
-	"  mov   a6, %6 \n" \
-	"  mov   a5, %5 \n" \
-	"  mov   a4, %4 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   a7, a9 \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
-                             "a" (arg3), "a" (arg4) \
-	: "a2", "a3", "a4", "a5", "a6", "a9" \
-	); \
-__syscall_return(type,__res); \
-}
-
-/* Note that we save and restore the a7 frame pointer.
- * Including a7 in the clobber list doesn't do what you'd expect.
- */
-#define _syscall6(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a9, a7 \n" \
-	"  mov   a8, %2 \n" \
-	"  mov   a7, %7 \n" \
-	"  mov   a6, %6 \n" \
-	"  mov   a5, %5 \n" \
-	"  mov   a4, %4 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   a7, a9 \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
-                             "a" (arg3), "a" (arg4), "a" (arg5)  \
-	: "a2", "a3", "a4", "a5", "a6", "a8", "a9" \
-	); \
-__syscall_return(type,__res); \
-}
-
 /*
  * "Conditional" syscalls
  *
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index ff43312..e618b25 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -221,6 +221,7 @@
 unifdef-y += if_ec.h
 unifdef-y += if_eql.h
 unifdef-y += if_ether.h
+unifdef-y += if_fddi.h
 unifdef-y += if_frad.h
 unifdef-y += if_ltalk.h
 unifdef-y += if_pppox.h
@@ -282,6 +283,7 @@
 unifdef-y += parport.h
 unifdef-y += patchkey.h
 unifdef-y += pci.h
+unifdef-y += personality.h
 unifdef-y += pktcdvd.h
 unifdef-y += pmu.h
 unifdef-y += poll.h
@@ -337,6 +339,7 @@
 unifdef-y += wait.h
 unifdef-y += wanrouter.h
 unifdef-y += watchdog.h
+unifdef-y += wireless.h
 unifdef-y += xfrm.h
 
 objhdr-y += version.h
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 0d71c00..3372ec6 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -111,7 +111,6 @@
 	size_t			ki_nbytes; 	/* copy of iocb->aio_nbytes */
 	char 			__user *ki_buf;	/* remaining iocb->aio_buf */
 	size_t			ki_left; 	/* remaining bytes */
-	long			ki_retried; 	/* just for testing */
 	struct iovec		ki_inline_vec;	/* inline vector */
  	struct iovec		*ki_iovec;
  	unsigned long		ki_nr_segs;
@@ -194,7 +193,7 @@
 
 	struct aio_ring_info	ring_info;
 
-	struct work_struct	wq;
+	struct delayed_work	wq;
 };
 
 /* prototypes */
@@ -238,7 +237,6 @@
 } while (0)
 
 #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
-#define is_retried_kiocb(iocb) ((iocb)->ki_retried > 1)
 
 #include <linux/aio_abi.h>
 
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 31e9abb..2275f27 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -119,8 +119,7 @@
 				     unsigned int *_hash_mask,
 				     unsigned long limit);
 
-#define HASH_HIGHMEM	0x00000001	/* Consider highmem? */
-#define HASH_EARLY	0x00000002	/* Allocating during early boot? */
+#define HASH_EARLY	0x00000001	/* Allocating during early boot? */
 
 /* Only NUMA needs hash distribution.
  * IA64 is known to have sufficient vmalloc space.
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
new file mode 100644
index 0000000..777dbf6
--- /dev/null
+++ b/include/linux/bottom_half.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_BH_H
+#define _LINUX_BH_H
+
+extern void local_bh_disable(void);
+extern void __local_bh_enable(void);
+extern void _local_bh_enable(void);
+extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
+
+#endif /* _LINUX_BH_H */
diff --git a/include/linux/carta_random32.h b/include/linux/carta_random32.h
deleted file mode 100644
index f6f3bd9..0000000
--- a/include/linux/carta_random32.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Fast, simple, yet decent quality random number generator based on
- * a paper by David G. Carta ("Two Fast Implementations of the
- * `Minimal Standard' Random Number Generator," Communications of the
- * ACM, January, 1990).
- *
- * Copyright (c) 2002-2006 Hewlett-Packard Development Company, L.P.
- *	Contributed by Stephane Eranian <eranian@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
- */
-#ifndef _LINUX_CARTA_RANDOM32_H_
-#define _LINUX_CARTA_RANDOM32_H_
-
-u64 carta_random32(u64 seed);
-
-#endif /* _LINUX_CARTA_RANDOM32_H_ */
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
index 6e27f42..cb57c30 100644
--- a/include/linux/cciss_ioctl.h
+++ b/include/linux/cciss_ioctl.h
@@ -80,7 +80,7 @@
 #define HWORD __u16
 #define DWORD __u32
 
-#define CISS_MAX_LUN	16	
+#define CISS_MAX_LUN	1024
 
 #define LEVEL2LUN   1   // index into Target(x) structure, due to byte swapping
 #define LEVEL3LUN   0
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index ee5f53f..f309b00 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -2,6 +2,10 @@
 #define _LINUX_CDEV_H
 #ifdef __KERNEL__
 
+#include <linux/kobject.h>
+#include <linux/kdev_t.h>
+#include <linux/list.h>
+
 struct cdev {
 	struct kobject kobj;
 	struct module *owner;
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 4c02119..3ea1cd5 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -133,7 +133,7 @@
 struct cn_callback_entry {
 	struct list_head callback_entry;
 	struct cn_callback *cb;
-	struct work_struct work;
+	struct delayed_work work;
 	struct cn_queue_dev *pdev;
 
 	struct cn_callback_id id;
@@ -170,7 +170,7 @@
 
 int cn_cb_equal(struct cb_id *, struct cb_id *);
 
-void cn_queue_wrapper(void *data);
+void cn_queue_wrapper(struct work_struct *work);
 
 extern int cn_already_initialized;
 
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index ad90340..bfb5202 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -24,6 +24,7 @@
 #include <linux/compiler.h>
 #include <linux/cpumask.h>
 #include <asm/semaphore.h>
+#include <linux/mutex.h>
 
 struct cpu {
 	int node_id;		/* The node which contains the CPU */
@@ -74,6 +75,17 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 /* Stop CPUs going up and down. */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{
+	mutex_lock(cpu_hp_mutex);
+}
+
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{
+	mutex_unlock(cpu_hp_mutex);
+}
+
 extern void lock_cpu_hotplug(void);
 extern void unlock_cpu_hotplug(void);
 #define hotcpu_notifier(fn, pri) {				\
@@ -85,17 +97,24 @@
 #define unregister_hotcpu_notifier(nb)	unregister_cpu_notifier(nb)
 int cpu_down(unsigned int cpu);
 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-#else
+
+#else		/* CONFIG_HOTPLUG_CPU */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{ }
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{ }
+
 #define lock_cpu_hotplug()	do { } while (0)
 #define unlock_cpu_hotplug()	do { } while (0)
 #define lock_cpu_hotplug_interruptible() 0
-#define hotcpu_notifier(fn, pri)	do { } while (0)
-#define register_hotcpu_notifier(nb)	do { } while (0)
-#define unregister_hotcpu_notifier(nb)	do { } while (0)
+#define hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
+#define register_hotcpu_notifier(nb)	do { (void)(nb); } while (0)
+#define unregister_hotcpu_notifier(nb)	do { (void)(nb); } while (0)
 
 /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
 static inline int cpu_is_offline(int cpu) { return 0; }
-#endif
+#endif		/* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_SUSPEND_SMP
 extern int disable_nonboot_cpus(void);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 4d8adf6..8821e1f 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -23,6 +23,7 @@
 extern void cpuset_exit(struct task_struct *p);
 extern cpumask_t cpuset_cpus_allowed(struct task_struct *p);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
+#define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
 void cpuset_update_task_memory_state(void);
 #define cpuset_nodes_subset_current_mems_allowed(nodes) \
@@ -45,7 +46,7 @@
 extern int cpuset_memory_pressure_enabled;
 extern void __cpuset_memory_pressure_bump(void);
 
-extern struct file_operations proc_cpuset_operations;
+extern const struct file_operations proc_cpuset_operations;
 extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
 
 extern void cpuset_lock(void);
@@ -83,6 +84,7 @@
 	return node_possible_map;
 }
 
+#define cpuset_current_mems_allowed (node_online_map)
 static inline void cpuset_init_current_mems_allowed(void) {}
 static inline void cpuset_update_task_memory_state(void) {}
 #define cpuset_nodes_subset_current_mems_allowed(nodes) (1)
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 952bee7..a1c10b0 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -24,7 +24,7 @@
 	int __ret = 0;							\
 									\
 	if (unlikely(c)) {						\
-		if (debug_locks_off())					\
+		if (debug_locks_silent || debug_locks_off())		\
 			WARN_ON(1);					\
 		__ret = 1;						\
 	}								\
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 561e2a7..55d1ca5 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -30,7 +30,7 @@
 #ifdef CONFIG_TASK_DELAY_ACCT
 
 extern int delayacct_on;	/* Delay accounting turned on/off */
-extern kmem_cache_t *delayacct_cache;
+extern struct kmem_cache *delayacct_cache;
 extern void delayacct_init(void);
 extern void __delayacct_tsk_init(struct task_struct *);
 extern void __delayacct_tsk_exit(struct task_struct *);
diff --git a/include/linux/device.h b/include/linux/device.h
index 583a341..49ab53c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -371,6 +371,9 @@
 					   core doesn't touch it */
 	struct dev_pm_info	power;
 
+#ifdef CONFIG_NUMA
+	int		numa_node;	/* NUMA node this device is close to */
+#endif
 	u64		*dma_mask;	/* dma mask (if dma'able device) */
 	u64		coherent_dma_mask;/* Like dma_mask, but for
 					     alloc_coherent mappings as
@@ -394,6 +397,25 @@
 	void	(*release)(struct device * dev);
 };
 
+#ifdef CONFIG_NUMA
+static inline int dev_to_node(struct device *dev)
+{
+	return dev->numa_node;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+	dev->numa_node = node;
+}
+#else
+static inline int dev_to_node(struct device *dev)
+{
+	return -1;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+}
+#endif
+
 static inline void *
 dev_get_drvdata (struct device *dev)
 {
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 743d5c8..60713e6 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -6,6 +6,8 @@
 #include <linux/elf-em.h>
 #include <asm/elf.h>
 
+struct file;
+
 #ifndef elf_read_implies_exec
   /* Executables for which elf_read_implies_exec() returns TRUE will
      have the READ_IMPLIES_EXEC personality flag set automatically.
@@ -358,6 +360,7 @@
 #define elfhdr		elf32_hdr
 #define elf_phdr	elf32_phdr
 #define elf_note	elf32_note
+#define elf_addr_t	Elf32_Off
 
 #else
 
@@ -365,6 +368,7 @@
 #define elfhdr		elf64_hdr
 #define elf_phdr	elf64_phdr
 #define elf_note	elf64_note
+#define elf_addr_t	Elf64_Off
 
 #endif
 
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index ce0e610..8c43b13 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -109,74 +109,32 @@
  * been done yet.
  */
 
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err);
-
-static inline int
-__ext3_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_undo_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
-
-static inline int
-__ext3_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_write_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
-
-static inline void
-ext3_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
+static inline void ext3_journal_release_buffer(handle_t *handle,
+						struct buffer_head *bh)
 {
 	journal_release_buffer(handle, bh);
 }
 
-static inline int
-__ext3_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_forget(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+void ext3_journal_abort_handle(const char *caller, const char *err_fn,
+		struct buffer_head *bh, handle_t *handle, int err);
 
-static inline int
-__ext3_journal_revoke(const char *where, handle_t *handle,
-		      unsigned long blocknr, struct buffer_head *bh)
-{
-	int err = journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
-static inline int
-__ext3_journal_get_create_access(const char *where,
-				 handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_get_create_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
-static inline int
-__ext3_journal_dirty_metadata(const char *where,
-			      handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_dirty_metadata(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext3_journal_forget(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+				unsigned long blocknr, struct buffer_head *bh);
+
+int __ext3_journal_get_create_access(const char *where,
+				handle_t *handle, struct buffer_head *bh);
+
+int __ext3_journal_dirty_metadata(const char *where,
+				handle_t *handle, struct buffer_head *bh);
 
 #define ext3_journal_get_undo_access(handle, bh) \
 	__ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh))
diff --git a/include/linux/ext4_jbd2.h b/include/linux/ext4_jbd2.h
index 72dd631..d716e63 100644
--- a/include/linux/ext4_jbd2.h
+++ b/include/linux/ext4_jbd2.h
@@ -114,74 +114,32 @@
  * been done yet.
  */
 
-void ext4_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err);
-
-static inline int
-__ext4_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = jbd2_journal_get_undo_access(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
-
-static inline int
-__ext4_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = jbd2_journal_get_write_access(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
-
-static inline void
-ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
+static inline void ext4_journal_release_buffer(handle_t *handle,
+						struct buffer_head *bh)
 {
 	jbd2_journal_release_buffer(handle, bh);
 }
 
-static inline int
-__ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
-{
-	int err = jbd2_journal_forget(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+void ext4_journal_abort_handle(const char *caller, const char *err_fn,
+		struct buffer_head *bh, handle_t *handle, int err);
 
-static inline int
-__ext4_journal_revoke(const char *where, handle_t *handle,
-		      ext4_fsblk_t blocknr, struct buffer_head *bh)
-{
-	int err = jbd2_journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
-static inline int
-__ext4_journal_get_create_access(const char *where,
-				 handle_t *handle, struct buffer_head *bh)
-{
-	int err = jbd2_journal_get_create_access(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext4_journal_get_write_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
-static inline int
-__ext4_journal_dirty_metadata(const char *where,
-			      handle_t *handle, struct buffer_head *bh)
-{
-	int err = jbd2_journal_dirty_metadata(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext4_journal_forget(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
+int __ext4_journal_revoke(const char *where, handle_t *handle,
+				ext4_fsblk_t blocknr, struct buffer_head *bh);
+
+int __ext4_journal_get_create_access(const char *where,
+				handle_t *handle, struct buffer_head *bh);
+
+int __ext4_journal_dirty_metadata(const char *where,
+				handle_t *handle, struct buffer_head *bh);
 
 #define ext4_journal_get_undo_access(handle, bh) \
 	__ext4_journal_get_undo_access(__FUNCTION__, (handle), (bh))
diff --git a/include/linux/file.h b/include/linux/file.h
index 74183e6..6e77b91 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -64,6 +64,8 @@
 
 #define files_fdtable(files) (rcu_dereference((files)->fdt))
 
+extern struct kmem_cache *filp_cachep;
+
 extern void FASTCALL(__fput(struct file *));
 extern void FASTCALL(fput(struct file *));
 
@@ -114,4 +116,6 @@
 void FASTCALL(put_files_struct(struct files_struct *fs));
 void reset_files_struct(struct task_struct *, struct files_struct *);
 
+extern struct kmem_cache *files_cachep;
+
 #endif /* __LINUX_FILE_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
new file mode 100644
index 0000000..6e05e3e
--- /dev/null
+++ b/include/linux/freezer.h
@@ -0,0 +1,87 @@
+/* Freezer declarations */
+
+#ifdef CONFIG_PM
+/*
+ * Check if a process has been frozen
+ */
+static inline int frozen(struct task_struct *p)
+{
+	return p->flags & PF_FROZEN;
+}
+
+/*
+ * Check if there is a request to freeze a process
+ */
+static inline int freezing(struct task_struct *p)
+{
+	return p->flags & PF_FREEZE;
+}
+
+/*
+ * Request that a process be frozen
+ * FIXME: SMP problem. We may not modify other process' flags!
+ */
+static inline void freeze(struct task_struct *p)
+{
+	p->flags |= PF_FREEZE;
+}
+
+/*
+ * Sometimes we may need to cancel the previous 'freeze' request
+ */
+static inline void do_not_freeze(struct task_struct *p)
+{
+	p->flags &= ~PF_FREEZE;
+}
+
+/*
+ * Wake up a frozen process
+ */
+static inline int thaw_process(struct task_struct *p)
+{
+	if (frozen(p)) {
+		p->flags &= ~PF_FROZEN;
+		wake_up_process(p);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * freezing is complete, mark process as frozen
+ */
+static inline void frozen_process(struct task_struct *p)
+{
+	p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
+}
+
+extern void refrigerator(void);
+extern int freeze_processes(void);
+extern void thaw_processes(void);
+
+static inline int try_to_freeze(void)
+{
+	if (freezing(current)) {
+		refrigerator();
+		return 1;
+	} else
+		return 0;
+}
+
+extern void thaw_some_processes(int all);
+
+#else
+static inline int frozen(struct task_struct *p) { return 0; }
+static inline int freezing(struct task_struct *p) { return 0; }
+static inline void freeze(struct task_struct *p) { BUG(); }
+static inline int thaw_process(struct task_struct *p) { return 1; }
+static inline void frozen_process(struct task_struct *p) { BUG(); }
+
+static inline void refrigerator(void) {}
+static inline int freeze_processes(void) { BUG(); return 0; }
+static inline void thaw_processes(void) {}
+
+static inline int try_to_freeze(void) { return 0; }
+
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cac7b1e..70b99fb 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -543,19 +543,22 @@
 	struct list_head	i_dentry;
 	unsigned long		i_ino;
 	atomic_t		i_count;
-	umode_t			i_mode;
 	unsigned int		i_nlink;
 	uid_t			i_uid;
 	gid_t			i_gid;
 	dev_t			i_rdev;
+	unsigned long		i_version;
 	loff_t			i_size;
+#ifdef __NEED_I_SIZE_ORDERED
+	seqcount_t		i_size_seqcount;
+#endif
 	struct timespec		i_atime;
 	struct timespec		i_mtime;
 	struct timespec		i_ctime;
 	unsigned int		i_blkbits;
-	unsigned long		i_version;
 	blkcnt_t		i_blocks;
 	unsigned short          i_bytes;
+	umode_t			i_mode;
 	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
 	struct mutex		i_mutex;
 	struct rw_semaphore	i_alloc_sem;
@@ -598,9 +601,6 @@
 	void			*i_security;
 #endif
 	void			*i_private; /* fs or device private pointer */
-#ifdef __NEED_I_SIZE_ORDERED
-	seqcount_t		i_size_seqcount;
-#endif
 };
 
 /*
@@ -636,7 +636,7 @@
  * cmpxchg8b without the need of the lock prefix). For SMP compiles
  * and 64bit archs it makes no difference if preempt is enabled or not.
  */
-static inline loff_t i_size_read(struct inode *inode)
+static inline loff_t i_size_read(const struct inode *inode)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	loff_t i_size;
@@ -679,12 +679,12 @@
 #endif
 }
 
-static inline unsigned iminor(struct inode *inode)
+static inline unsigned iminor(const struct inode *inode)
 {
 	return MINOR(inode->i_rdev);
 }
 
-static inline unsigned imajor(struct inode *inode)
+static inline unsigned imajor(const struct inode *inode)
 {
 	return MAJOR(inode->i_rdev);
 }
@@ -1481,7 +1481,9 @@
 extern void __init vfs_caches_init_early(void);
 extern void __init vfs_caches_init(unsigned long);
 
-#define __getname()	kmem_cache_alloc(names_cachep, SLAB_KERNEL)
+extern struct kmem_cache *names_cachep;
+
+#define __getname()	kmem_cache_alloc(names_cachep, GFP_KERNEL)
 #define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
 #ifndef CONFIG_AUDITSYSCALL
 #define putname(name)   __putname(name)
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index c623d12..11a36ce 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -18,6 +18,8 @@
 	.umask		= 0022, \
 }
 
+extern struct kmem_cache *fs_cachep;
+
 extern void exit_fs(struct task_struct *);
 extern void set_fs_altroot(void);
 extern void set_fs_root(struct fs_struct *, struct vfsmount *, struct dentry *);
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 9fc48a6..534744e 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -15,7 +15,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 7
+#define FUSE_KERNEL_MINOR_VERSION 8
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -92,6 +92,11 @@
 #define FUSE_ASYNC_READ		(1 << 0)
 #define FUSE_POSIX_LOCKS	(1 << 1)
 
+/**
+ * Release flags
+ */
+#define FUSE_RELEASE_FLUSH	(1 << 0)
+
 enum fuse_opcode {
 	FUSE_LOOKUP	   = 1,
 	FUSE_FORGET	   = 2,  /* no reply */
@@ -127,6 +132,8 @@
 	FUSE_ACCESS        = 34,
 	FUSE_CREATE        = 35,
 	FUSE_INTERRUPT     = 36,
+	FUSE_BMAP          = 37,
+	FUSE_DESTROY       = 38,
 };
 
 /* The read buffer is required to be at least 8k, but may be much larger */
@@ -205,12 +212,13 @@
 struct fuse_release_in {
 	__u64	fh;
 	__u32	flags;
-	__u32	padding;
+	__u32	release_flags;
+	__u64	lock_owner;
 };
 
 struct fuse_flush_in {
 	__u64	fh;
-	__u32	flush_flags;
+	__u32	unused;
 	__u32	padding;
 	__u64	lock_owner;
 };
@@ -296,6 +304,16 @@
 	__u64	unique;
 };
 
+struct fuse_bmap_in {
+	__u64	block;
+	__u32	blocksize;
+	__u32	padding;
+};
+
+struct fuse_bmap_out {
+	__u64	block;
+};
+
 struct fuse_in_header {
 	__u32	len;
 	__u32	opcode;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bf2b6bc..00c314a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -116,6 +116,9 @@
 #ifndef HAVE_ARCH_FREE_PAGE
 static inline void arch_free_page(struct page *page, int order) { }
 #endif
+#ifndef HAVE_ARCH_ALLOC_PAGE
+static inline void arch_alloc_page(struct page *page, int order) { }
+#endif
 
 extern struct page *
 FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index fd7d12d..3d8768b 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -3,6 +3,7 @@
 
 #include <linux/fs.h>
 #include <linux/mm.h>
+#include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
 
@@ -41,9 +42,10 @@
 
 #define kunmap(page) do { (void) (page); } while (0)
 
-#define kmap_atomic(page, idx)		page_address(page)
-#define kunmap_atomic(addr, idx)	do { } while (0)
-#define kmap_atomic_pfn(pfn, idx)	page_address(pfn_to_page(pfn))
+#define kmap_atomic(page, idx) \
+	({ pagefault_disable(); page_address(page); })
+#define kunmap_atomic(addr, idx)	do { pagefault_enable(); } while (0)
+#define kmap_atomic_pfn(pfn, idx)	kmap_atomic(pfn_to_page(pfn), (idx))
 #define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
 #endif
 
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ace64e5..a60995a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,6 +35,7 @@
 
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 			      int write);
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index c115e9e..52f53e2 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -461,7 +461,7 @@
 	int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
 
 	/* Event handler */
-	void (*event) (struct i2o_event *);
+	work_func_t event;
 
 	struct workqueue_struct *event_queue;	/* Event queue */
 
@@ -490,7 +490,7 @@
  */
 struct i2o_pool {
 	char *name;
-	kmem_cache_t *slab;
+	struct kmem_cache *slab;
 	mempool_t *mempool;
 };
 
@@ -986,7 +986,8 @@
 
 /**
  *	i2o_driver_notify_controller_add - Send notification of added controller
- *					   to a single I2O driver
+ *	@drv: I2O driver
+ *	@c: I2O controller
  *
  *	Send notification of added controller to a single registered driver.
  */
@@ -998,8 +999,9 @@
 };
 
 /**
- *	i2o_driver_notify_controller_remove - Send notification of removed
- *					      controller to a single I2O driver
+ *	i2o_driver_notify_controller_remove - Send notification of removed controller
+ *	@drv: I2O driver
+ *	@c: I2O controller
  *
  *	Send notification of removed controller to a single registered driver.
  */
@@ -1011,8 +1013,9 @@
 };
 
 /**
- *	i2o_driver_notify_device_add - Send notification of added device to a
- *				       single I2O driver
+ *	i2o_driver_notify_device_add - Send notification of added device
+ *	@drv: I2O driver
+ *	@i2o_dev: the added i2o_device
  *
  *	Send notification of added device to a single registered driver.
  */
@@ -1025,7 +1028,8 @@
 
 /**
  *	i2o_driver_notify_device_remove - Send notification of removed device
- *					  to a single I2O driver
+ *	@drv: I2O driver
+ *	@i2o_dev: the added i2o_device
  *
  *	Send notification of removed device to a single registered driver.
  */
@@ -1148,7 +1152,7 @@
 /**
  * 	i2o_msg_post_wait - Post and wait a message and wait until return
  *	@c: controller
- *	@m: message to post
+ *	@msg: message to post
  *	@timeout: time in seconds to wait
  *
  * 	This API allows an OSM to post a message and then be told whether or
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 33c5daa..733790d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -73,7 +73,7 @@
 extern struct nsproxy init_nsproxy;
 #define INIT_NSPROXY(nsproxy) {						\
 	.count		= ATOMIC_INIT(1),				\
-	.nslock		= SPIN_LOCK_UNLOCKED,				\
+	.nslock		= __SPIN_LOCK_UNLOCKED(nsproxy.nslock),		\
 	.uts_ns		= &init_uts_ns,					\
 	.namespace	= NULL,						\
 	INIT_IPC_NS(ipc_ns)						\
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5b83e7b..de7593f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,6 +11,7 @@
 #include <linux/hardirq.h>
 #include <linux/sched.h>
 #include <linux/irqflags.h>
+#include <linux/bottom_half.h>
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
@@ -217,12 +218,6 @@
 #define save_and_cli(x)	save_and_cli(&x)
 #endif /* CONFIG_SMP */
 
-extern void local_bh_disable(void);
-extern void __local_bh_enable(void);
-extern void _local_bh_enable(void);
-extern void local_bh_enable(void);
-extern void local_bh_enable_ip(unsigned long ip);
-
 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
    frequency threaded job scheduling. For almost all the purposes
    tasklets are more than enough. F.e. all serial device BHs et
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 796ca00..7a9db390 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -208,6 +208,15 @@
    code as the first byte of the incoming data, unlike a response. */
 
 
+/*
+ * Modes for ipmi_set_maint_mode() and the userland IOCTL.  The AUTO
+ * setting is the default and means it will be set on certain
+ * commands.  Hard setting it on and off will override automatic
+ * operation.
+ */
+#define IPMI_MAINTENANCE_MODE_AUTO	0
+#define IPMI_MAINTENANCE_MODE_OFF	1
+#define IPMI_MAINTENANCE_MODE_ON	2
 
 #ifdef __KERNEL__
 
@@ -374,6 +383,35 @@
 			    unsigned int  chans);
 
 /*
+ * Go into a mode where the driver will not autonomously attempt to do
+ * things with the interface.  It will still respond to attentions and
+ * interrupts, and it will expect that commands will complete.  It
+ * will not automatcially check for flags, events, or things of that
+ * nature.
+ *
+ * This is primarily used for firmware upgrades.  The idea is that
+ * when you go into firmware upgrade mode, you do this operation
+ * and the driver will not attempt to do anything but what you tell
+ * it or what the BMC asks for.
+ *
+ * Note that if you send a command that resets the BMC, the driver
+ * will still expect a response from that command.  So the BMC should
+ * reset itself *after* the response is sent.  Resetting before the
+ * response is just silly.
+ *
+ * If in auto maintenance mode, the driver will automatically go into
+ * maintenance mode for 30 seconds if it sees a cold reset, a warm
+ * reset, or a firmware NetFN.  This means that code that uses only
+ * firmware NetFN commands to do upgrades will work automatically
+ * without change, assuming it sends a message every 30 seconds or
+ * less.
+ *
+ * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means.
+ */
+int ipmi_get_maintenance_mode(ipmi_user_t user);
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
+
+/*
  * Allow run-to-completion mode to be set for the interface of
  * a specific user.
  */
@@ -656,4 +694,11 @@
 #define IPMICTL_GET_TIMING_PARMS_CMD	_IOR(IPMI_IOC_MAGIC, 23, \
 					     struct ipmi_timing_parms)
 
+/*
+ * Set the maintenance mode.  See ipmi_set_maintenance_mode() above
+ * for a description of what this does.
+ */
+#define IPMICTL_GET_MAINTENANCE_MODE_CMD	_IOR(IPMI_IOC_MAGIC, 30, int)
+#define IPMICTL_SET_MAINTENANCE_MODE_CMD	_IOW(IPMI_IOC_MAGIC, 31, int)
+
 #endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h
index 4d04d8b..b56a158 100644
--- a/include/linux/ipmi_msgdefs.h
+++ b/include/linux/ipmi_msgdefs.h
@@ -46,6 +46,8 @@
 #define IPMI_NETFN_APP_REQUEST			0x06
 #define IPMI_NETFN_APP_RESPONSE			0x07
 #define IPMI_GET_DEVICE_ID_CMD		0x01
+#define IPMI_COLD_RESET_CMD		0x02
+#define IPMI_WARM_RESET_CMD		0x03
 #define IPMI_CLEAR_MSG_FLAGS_CMD	0x30
 #define IPMI_GET_DEVICE_GUID_CMD	0x08
 #define IPMI_GET_MSG_FLAGS_CMD		0x31
@@ -60,20 +62,27 @@
 #define IPMI_NETFN_STORAGE_RESPONSE		0x0b
 #define IPMI_ADD_SEL_ENTRY_CMD		0x44
 
+#define IPMI_NETFN_FIRMWARE_REQUEST		0x08
+#define IPMI_NETFN_FIRMWARE_RESPONSE		0x09
+
 /* The default slave address */
 #define IPMI_BMC_SLAVE_ADDR	0x20
 
 /* The BT interface on high-end HP systems supports up to 255 bytes in
  * one transfer.  Its "virtual" BMC supports some commands that are longer
  * than 128 bytes.  Use the full 256, plus NetFn/LUN, Cmd, cCode, plus
- * some overhead.  It would be nice to base this on the "BT Capabilities"
- * but that's too hard to propagate to the rest of the driver. */
+ * some overhead; it's not worth the effort to dynamically size this based
+ * on the results of the "Get BT Capabilities" command. */
 #define IPMI_MAX_MSG_LENGTH	272	/* multiple of 16 */
 
 #define IPMI_CC_NO_ERROR		0x00
 #define IPMI_NODE_BUSY_ERR		0xc0
 #define IPMI_INVALID_COMMAND_ERR	0xc1
+#define IPMI_TIMEOUT_ERR		0xc3
 #define IPMI_ERR_MSG_TRUNCATED		0xc6
+#define IPMI_REQ_LEN_INVALID_ERR	0xc7
+#define IPMI_REQ_LEN_EXCEEDED_ERR	0xc8
+#define IPMI_NOT_IN_MY_STATE_ERR	0xd5	/* IPMI 2.0 */
 #define IPMI_LOST_ARBITRATION_ERR	0x81
 #define IPMI_BUS_ERR			0x82
 #define IPMI_NAK_ON_WRITE_ERR		0x83
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 6d9c7e4..c063310 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -115,6 +115,13 @@
 	   poll for operations during things like crash dumps. */
 	void (*poll)(void *send_info);
 
+	/* Enable/disable firmware maintenance mode.  Note that this
+	   is *not* the modes defined, this is simply an on/off
+	   setting.  The message handler does the mode handling.  Note
+	   that this is called from interupt context, so it cannot
+	   block. */
+	void (*set_maintenance_mode)(void *send_info, int enable);
+
 	/* Tell the handler that we are using it/not using it.  The
 	   message handler get the modules that this handler belongs
 	   to; this function lets the SMI claim any modules that it
@@ -173,6 +180,7 @@
 		      void                     *send_info,
 		      struct ipmi_device_id    *device_id,
 		      struct device            *dev,
+		      const char               *sysfs_name,
 		      unsigned char            slave_addr);
 
 /*
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index fe89444..4527375 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -839,7 +839,6 @@
  */
 
 /* Filing buffers */
-extern void __journal_temp_unlink_buffer(struct journal_head *jh);
 extern void journal_unfile_buffer(journal_t *, struct journal_head *);
 extern void __journal_unfile_buffer(struct journal_head *);
 extern void __journal_refile_buffer(struct journal_head *);
@@ -949,7 +948,7 @@
 /*
  * handle management
  */
-extern kmem_cache_t *jbd_handle_cache;
+extern struct kmem_cache *jbd_handle_cache;
 
 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
 {
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index ddb1287..0e0fedd 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -848,7 +848,6 @@
  */
 
 /* Filing buffers */
-extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
 extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
 extern void __jbd2_journal_unfile_buffer(struct journal_head *);
 extern void __jbd2_journal_refile_buffer(struct journal_head *);
@@ -958,7 +957,7 @@
 /*
  * handle management
  */
-extern kmem_cache_t *jbd2_handle_cache;
+extern struct kmem_cache *jbd2_handle_cache;
 
 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
 {
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index efe0ee4..06c58c4 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -158,7 +158,7 @@
 	if (t->buf.tail != NULL)
 		t->buf.tail->commit = t->buf.tail->used;
 	spin_unlock_irqrestore(&t->buf.lock, flags);
-	schedule_work(&t->buf.work);
+	schedule_delayed_work(&t->buf.work, 0);
 }
 
 #endif
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index a4ede62..e3abcec 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -105,6 +105,7 @@
 						unsigned int order);
 extern void crash_kexec(struct pt_regs *);
 int kexec_should_crash(struct task_struct *);
+void crash_save_cpu(struct pt_regs *regs, int cpu);
 extern struct kimage *kexec_image;
 extern struct kimage *kexec_crash_image;
 
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ac4c055..769be39 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -165,7 +165,7 @@
 extern int arch_init_kprobes(void);
 extern void show_registers(struct pt_regs *regs);
 extern kprobe_opcode_t *get_insn_slot(void);
-extern void free_insn_slot(kprobe_opcode_t *slot);
+extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
 extern void kprobes_inc_nmissed_count(struct kprobe *p);
 
 /* Get the kprobe at this addr (if any) - called with preemption disabled */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 84eeecd..611f17f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -248,9 +248,9 @@
  *
  * Returns the scalar nanoseconds representation of kt
  */
-static inline u64 ktime_to_ns(const ktime_t kt)
+static inline s64 ktime_to_ns(const ktime_t kt)
 {
-	return (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
+	return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
 }
 
 #endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 202283b..ab27548 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -575,8 +575,9 @@
 	struct ata_host		*host;
 	struct device 		*dev;
 
-	struct work_struct	port_task;
-	struct work_struct	hotplug_task;
+	void			*port_task_data;
+	struct delayed_work	port_task;
+	struct delayed_work	hotplug_task;
 	struct work_struct	scsi_rescan_task;
 
 	unsigned int		hsm_task_state;
@@ -755,7 +756,7 @@
 extern int ata_ratelimit(void);
 extern int ata_busy_sleep(struct ata_port *ap,
 			  unsigned long timeout_pat, unsigned long timeout);
-extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
+extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
 				void *data, unsigned long delay);
 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
 			     unsigned long interval_msec,
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 862d973..8c39654 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -164,14 +164,12 @@
  */
 struct nlm_host * nlmclnt_lookup_host(const struct sockaddr_in *, int, int, const char *, int);
 struct nlm_host * nlmsvc_lookup_host(struct svc_rqst *, const char *, int);
-struct nlm_host * nlm_lookup_host(int server, const struct sockaddr_in *, int, int, const char *, int);
 struct rpc_clnt * nlm_bind_host(struct nlm_host *);
 void		  nlm_rebind_host(struct nlm_host *);
 struct nlm_host * nlm_get_host(struct nlm_host *);
 void		  nlm_release_host(struct nlm_host *);
 void		  nlm_shutdown_hosts(void);
 extern void	  nlm_host_rebooted(const struct sockaddr_in *, const char *, int, u32);
-struct nsm_handle *nsm_find(const struct sockaddr_in *, const char *, int);
 void		  nsm_release(struct nsm_handle *);
 
 
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 819f08f..498bfbd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -193,7 +193,6 @@
 
 extern void lockdep_off(void);
 extern void lockdep_on(void);
-extern int lockdep_internal(void);
 
 /*
  * These methods are used by specific locking variants (spinlocks,
@@ -243,6 +242,8 @@
 
 # define INIT_LOCKDEP				.lockdep_recursion = 0,
 
+#define lockdep_depth(tsk)	((tsk)->lockdep_depth)
+
 #else /* !LOCKDEP */
 
 static inline void lockdep_off(void)
@@ -253,11 +254,6 @@
 {
 }
 
-static inline int lockdep_internal(void)
-{
-	return 0;
-}
-
 # define lock_acquire(l, s, t, r, c, i)		do { } while (0)
 # define lock_release(l, n, i)			do { } while (0)
 # define lockdep_init()				do { } while (0)
@@ -277,6 +273,9 @@
  * The class key takes no space if lockdep is disabled:
  */
 struct lock_class_key { };
+
+#define lockdep_depth(tsk)	(0)
+
 #endif /* !LOCKDEP */
 
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d538de9..a17b147 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -114,6 +114,8 @@
 #endif
 };
 
+extern struct kmem_cache *vm_area_cachep;
+
 /*
  * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
  * disabled, then there's a single shared list of VMAs maintained by the
@@ -294,6 +296,24 @@
 void split_page(struct page *page, unsigned int order);
 
 /*
+ * Compound pages have a destructor function.  Provide a
+ * prototype for that function and accessor functions.
+ * These are _only_ valid on the head of a PG_compound page.
+ */
+typedef void compound_page_dtor(struct page *);
+
+static inline void set_compound_page_dtor(struct page *page,
+						compound_page_dtor *dtor)
+{
+	page[1].lru.next = (void *)dtor;
+}
+
+static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+{
+	return (compound_page_dtor *)page[1].lru.next;
+}
+
+/*
  * Multiple processes may "see" the same page. E.g. for untouched
  * mappings of /dev/null, all processes see the same page full of
  * zeroes, and text pages of executables and shared libraries have
@@ -396,7 +416,9 @@
  * We are going to use the flags for the page to node mapping if its in
  * there.  This includes the case where there is no node, so it is implicit.
  */
-#define FLAGS_HAS_NODE		(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#define NODE_NOT_IN_PAGE_FLAGS
+#endif
 
 #ifndef PFN_SECTION_SHIFT
 #define PFN_SECTION_SHIFT 0
@@ -411,13 +433,18 @@
 #define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
 #define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
 
-/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */
-#if FLAGS_HAS_NODE
-#define ZONETABLE_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
+#ifdef NODE_NOT_IN_PAGEFLAGS
+#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
 #else
-#define ZONETABLE_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
 #endif
-#define ZONETABLE_PGSHIFT	ZONES_PGSHIFT
+
+#if ZONES_WIDTH > 0
+#define ZONEID_PGSHIFT		ZONES_PGSHIFT
+#else
+#define ZONEID_PGSHIFT		NODES_PGOFF
+#endif
 
 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
@@ -426,26 +453,28 @@
 #define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
 #define NODES_MASK		((1UL << NODES_WIDTH) - 1)
 #define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
-#define ZONETABLE_MASK		((1UL << ZONETABLE_SHIFT) - 1)
+#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
 
 static inline enum zone_type page_zonenum(struct page *page)
 {
 	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 }
 
-struct zone;
-extern struct zone *zone_table[];
-
+/*
+ * The identification function is only used by the buddy allocator for
+ * determining if two pages could be buddies. We are not really
+ * identifying a zone since we could be using a the section number
+ * id if we have not node id available in page flags.
+ * We guarantee only that it will return the same value for two
+ * combinable pages in a zone.
+ */
 static inline int page_zone_id(struct page *page)
 {
-	return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK;
-}
-static inline struct zone *page_zone(struct page *page)
-{
-	return zone_table[page_zone_id(page)];
+	BUILD_BUG_ON(ZONEID_PGSHIFT == 0 && ZONEID_MASK);
+	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
 }
 
-static inline unsigned long zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(struct zone *zone)
 {
 #ifdef CONFIG_NUMA
 	return zone->node;
@@ -454,13 +483,20 @@
 #endif
 }
 
-static inline unsigned long page_to_nid(struct page *page)
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+extern int page_to_nid(struct page *page);
+#else
+static inline int page_to_nid(struct page *page)
 {
-	if (FLAGS_HAS_NODE)
-		return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
-	else
-		return zone_to_nid(page_zone(page));
+	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
 }
+#endif
+
+static inline struct zone *page_zone(struct page *page)
+{
+	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
+}
+
 static inline unsigned long page_to_section(struct page *page)
 {
 	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
@@ -477,6 +513,7 @@
 	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
 	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
 }
+
 static inline void set_page_section(struct page *page, unsigned long section)
 {
 	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
@@ -947,8 +984,6 @@
 extern void show_mem(void);
 extern void si_meminfo(struct sysinfo * val);
 extern void si_meminfo_node(struct sysinfo *val, int nid);
-extern void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
-					unsigned long pfn, unsigned long size);
 
 #ifdef CONFIG_NUMA
 extern void setup_per_cpu_pageset(void);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 528e7d3..c15ae19 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -110,7 +110,7 @@
 	struct mmc_card		*card_busy;	/* the MMC card claiming host */
 	struct mmc_card		*card_selected;	/* the selected MMC card */
 
-	struct work_struct	detect;
+	struct delayed_work	detect;
 
 	unsigned long		private[0] ____cacheline_aligned;
 };
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e06683e..e339a73 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -278,7 +278,7 @@
 	/*
 	 * rarely used fields:
 	 */
-	char			*name;
+	const char		*name;
 } ____cacheline_internodealigned_in_smp;
 
 /*
@@ -288,19 +288,94 @@
  */
 #define DEF_PRIORITY 12
 
+/* Maximum number of zones on a zonelist */
+#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
+
+#ifdef CONFIG_NUMA
+/*
+ * We cache key information from each zonelist for smaller cache
+ * footprint when scanning for free pages in get_page_from_freelist().
+ *
+ * 1) The BITMAP fullzones tracks which zones in a zonelist have come
+ *    up short of free memory since the last time (last_fullzone_zap)
+ *    we zero'd fullzones.
+ * 2) The array z_to_n[] maps each zone in the zonelist to its node
+ *    id, so that we can efficiently evaluate whether that node is
+ *    set in the current tasks mems_allowed.
+ *
+ * Both fullzones and z_to_n[] are one-to-one with the zonelist,
+ * indexed by a zones offset in the zonelist zones[] array.
+ *
+ * The get_page_from_freelist() routine does two scans.  During the
+ * first scan, we skip zones whose corresponding bit in 'fullzones'
+ * is set or whose corresponding node in current->mems_allowed (which
+ * comes from cpusets) is not set.  During the second scan, we bypass
+ * this zonelist_cache, to ensure we look methodically at each zone.
+ *
+ * Once per second, we zero out (zap) fullzones, forcing us to
+ * reconsider nodes that might have regained more free memory.
+ * The field last_full_zap is the time we last zapped fullzones.
+ *
+ * This mechanism reduces the amount of time we waste repeatedly
+ * reexaming zones for free memory when they just came up low on
+ * memory momentarilly ago.
+ *
+ * The zonelist_cache struct members logically belong in struct
+ * zonelist.  However, the mempolicy zonelists constructed for
+ * MPOL_BIND are intentionally variable length (and usually much
+ * shorter).  A general purpose mechanism for handling structs with
+ * multiple variable length members is more mechanism than we want
+ * here.  We resort to some special case hackery instead.
+ *
+ * The MPOL_BIND zonelists don't need this zonelist_cache (in good
+ * part because they are shorter), so we put the fixed length stuff
+ * at the front of the zonelist struct, ending in a variable length
+ * zones[], as is needed by MPOL_BIND.
+ *
+ * Then we put the optional zonelist cache on the end of the zonelist
+ * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in
+ * the fixed length portion at the front of the struct.  This pointer
+ * both enables us to find the zonelist cache, and in the case of
+ * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
+ * to know that the zonelist cache is not there.
+ *
+ * The end result is that struct zonelists come in two flavors:
+ *  1) The full, fixed length version, shown below, and
+ *  2) The custom zonelists for MPOL_BIND.
+ * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
+ *
+ * Even though there may be multiple CPU cores on a node modifying
+ * fullzones or last_full_zap in the same zonelist_cache at the same
+ * time, we don't lock it.  This is just hint data - if it is wrong now
+ * and then, the allocator will still function, perhaps a bit slower.
+ */
+
+
+struct zonelist_cache {
+	unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */
+	DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */
+	unsigned long last_full_zap;		/* when last zap'd (jiffies) */
+};
+#else
+struct zonelist_cache;
+#endif
+
 /*
  * One allocation request operates on a zonelist. A zonelist
  * is a list of zones, the first one is the 'goal' of the
  * allocation, the other zones are fallback zones, in decreasing
  * priority.
  *
- * Right now a zonelist takes up less than a cacheline. We never
- * modify it apart from boot-up, and only a few indices are used,
- * so despite the zonelist table being relatively big, the cache
- * footprint of this construct is very small.
+ * If zlcache_ptr is not NULL, then it is just the address of zlcache,
+ * as explained above.  If zlcache_ptr is NULL, there is no zlcache.
  */
+
 struct zonelist {
-	struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
+	struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache
+	struct zone *zones[MAX_ZONES_PER_ZONELIST + 1];      // NULL delimited
+#ifdef CONFIG_NUMA
+	struct zonelist_cache zlcache;			     // optional ...
+#endif
 };
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 7c0c2c1..4a189da 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -63,6 +63,9 @@
    not there, read bits mean it's readable, write bits mean it's
    writable. */
 #define __module_param_call(prefix, name, set, get, arg, perm)		\
+	/* Default value instead of permissions? */			\
+	static int __param_perm_check_##name __attribute__((unused)) =	\
+	BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2));	\
 	static char __param_str_##name[] = prefix #name;		\
 	static struct kernel_param const __param_##name			\
 	__attribute_used__						\
diff --git a/include/linux/msg.h b/include/linux/msg.h
index acc7c17..f1b6074 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -92,6 +92,12 @@
 	struct list_head q_senders;
 };
 
+/* Helper routines for sys_msgsnd and sys_msgrcv */
+extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
+			size_t msgsz, int msgflg);
+extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
+			size_t msgsz, long msgtyp, int msgflg);
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_MSG_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 27c48da..b2b91c4 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -94,7 +94,7 @@
 
 #define __MUTEX_INITIALIZER(lockname) \
 		{ .count = ATOMIC_INIT(1) \
-		, .wait_lock = SPIN_LOCK_UNLOCKED \
+		, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
 		, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
 		__DEBUG_MUTEX_INITIALIZER(lockname) \
 		__DEP_MAP_MUTEX_INITIALIZER(lockname) }
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index d6b6dc0..0f3e693 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -64,6 +64,7 @@
 	struct gendisk *disk;
 	int blksize;
 	u64 bytesize;
+	pid_t pid; /* pid of nbd-client, if attached */
 };
 
 #endif
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index b089d95..a503052 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -127,10 +127,10 @@
 	} unexpected_packet;
 };
 
-extern void ncp_tcp_rcv_proc(void *server);
-extern void ncp_tcp_tx_proc(void *server);
-extern void ncpdgram_rcv_proc(void *server);
-extern void ncpdgram_timeout_proc(void *server);
+extern void ncp_tcp_rcv_proc(struct work_struct *work);
+extern void ncp_tcp_tx_proc(struct work_struct *work);
+extern void ncpdgram_rcv_proc(struct work_struct *work);
+extern void ncpdgram_timeout_proc(struct work_struct *work);
 extern void ncpdgram_timeout_call(unsigned long server);
 extern void ncp_tcp_data_ready(struct sock* sk, int len);
 extern void ncp_tcp_write_space(struct sock* sk);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 2cc9867..29930b71 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -32,7 +32,7 @@
 	struct netpoll *rx_np; /* netpoll that registered an rx_hook */
 	struct sk_buff_head arp_tx; /* list of arp requests to reply to */
 	struct sk_buff_head txq;
-	struct work_struct tx_work;
+	struct delayed_work tx_work;
 };
 
 void netpoll_poll(struct netpoll *np);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7ccfc7e..95796e6 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -51,7 +51,7 @@
 
 	unsigned long		cl_lease_time;
 	unsigned long		cl_last_renewal;
-	struct work_struct	cl_renewd;
+	struct delayed_work	cl_renewd;
 
 	struct rpc_wait_queue	cl_rpcwaitq;
 
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c09da1e..ff2dcb4 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -390,7 +390,7 @@
 #define PCI_DEVICE_ID_NS_CS5535_IDE	0x002d
 #define PCI_DEVICE_ID_NS_CS5535_AUDIO	0x002e
 #define PCI_DEVICE_ID_NS_CS5535_USB	0x002f
-#define PCI_DEVICE_ID_NS_CS5535_VIDEO	0x0030
+#define PCI_DEVICE_ID_NS_GX_VIDEO	0x0030
 #define PCI_DEVICE_ID_NS_SATURN		0x0035
 #define PCI_DEVICE_ID_NS_SCx200_BRIDGE	0x0500
 #define PCI_DEVICE_ID_NS_SCx200_SMI	0x0501
@@ -403,8 +403,7 @@
 #define PCI_DEVICE_ID_NS_SC1100_XBUS	0x0515
 #define PCI_DEVICE_ID_NS_87410		0xd001
 
-#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE  0x0028
-#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE   0x002b
+#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE  0x0028
 
 #define PCI_VENDOR_ID_TSENG		0x100c
 #define PCI_DEVICE_ID_TSENG_W32P_2	0x3202
@@ -1864,6 +1863,7 @@
 #define PCI_DEVICE_ID_OXSEMI_16PCI95N	0x9511
 #define PCI_DEVICE_ID_OXSEMI_16PCI954PP	0x9513
 #define PCI_DEVICE_ID_OXSEMI_16PCI952	0x9521
+#define PCI_DEVICE_ID_OXSEMI_16PCI952PP	0x9523
 
 #define PCI_VENDOR_ID_SAMSUNG		0x144d
 
diff --git a/include/linux/profile.h b/include/linux/profile.h
index acce53f..5670b34 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -6,10 +6,15 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/cpumask.h>
+#include <linux/cache.h>
+
 #include <asm/errno.h>
 
+extern int prof_on __read_mostly;
+
 #define CPU_PROFILING	1
 #define SCHED_PROFILING	2
+#define SLEEP_PROFILING	3
 
 struct proc_dir_entry;
 struct pt_regs;
@@ -18,7 +23,24 @@
 /* init basic kernel profiler */
 void __init profile_init(void);
 void profile_tick(int);
-void profile_hit(int, void *);
+
+/*
+ * Add multiple profiler hits to a given address:
+ */
+void profile_hits(int, void *ip, unsigned int nr_hits);
+
+/*
+ * Single profiler hit:
+ */
+static inline void profile_hit(int type, void *ip)
+{
+	/*
+	 * Speedup for the common (no profiling enabled) case:
+	 */
+	if (unlikely(prof_on == type))
+		profile_hits(type, ip, 1);
+}
+
 #ifdef CONFIG_PROC_FS
 void create_prof_cpu_mask(struct proc_dir_entry *);
 #else
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 5110201..90c23f6 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -37,6 +37,9 @@
 extern int dquot_commit_info(struct super_block *sb, int type);
 extern int dquot_mark_dquot_dirty(struct dquot *dquot);
 
+int remove_inode_dquot_ref(struct inode *inode, int type,
+			   struct list_head *tofree_head);
+
 extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
 extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
 		int format_id, int type);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index cbfa115..0deb842 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2001 Momchil Velikov
  * Portions Copyright (C) 2001 Christoph Hellwig
+ * Copyright (C) 2006 Nick Piggin
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -21,6 +22,35 @@
 
 #include <linux/preempt.h>
 #include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+
+/*
+ * A direct pointer (root->rnode pointing directly to a data item,
+ * rather than another radix_tree_node) is signalled by the low bit
+ * set in the root->rnode pointer.
+ *
+ * In this case root->height is also NULL, but the direct pointer tests are
+ * needed for RCU lookups when root->height is unreliable.
+ */
+#define RADIX_TREE_DIRECT_PTR	1
+
+static inline void *radix_tree_ptr_to_direct(void *ptr)
+{
+	return (void *)((unsigned long)ptr | RADIX_TREE_DIRECT_PTR);
+}
+
+static inline void *radix_tree_direct_to_ptr(void *ptr)
+{
+	return (void *)((unsigned long)ptr & ~RADIX_TREE_DIRECT_PTR);
+}
+
+static inline int radix_tree_is_direct_ptr(void *ptr)
+{
+	return (int)((unsigned long)ptr & RADIX_TREE_DIRECT_PTR);
+}
+
+/*** radix-tree API starts here ***/
 
 #define RADIX_TREE_MAX_TAGS 2
 
@@ -47,6 +77,77 @@
 	(root)->rnode = NULL;						\
 } while (0)
 
+/**
+ * Radix-tree synchronization
+ *
+ * The radix-tree API requires that users provide all synchronisation (with
+ * specific exceptions, noted below).
+ *
+ * Synchronization of access to the data items being stored in the tree, and
+ * management of their lifetimes must be completely managed by API users.
+ *
+ * For API usage, in general,
+ * - any function _modifying_ the the tree or tags (inserting or deleting
+ *   items, setting or clearing tags must exclude other modifications, and
+ *   exclude any functions reading the tree.
+ * - any function _reading_ the the tree or tags (looking up items or tags,
+ *   gang lookups) must exclude modifications to the tree, but may occur
+ *   concurrently with other readers.
+ *
+ * The notable exceptions to this rule are the following functions:
+ * radix_tree_lookup
+ * radix_tree_tag_get
+ * radix_tree_gang_lookup
+ * radix_tree_gang_lookup_tag
+ * radix_tree_tagged
+ *
+ * The first 4 functions are able to be called locklessly, using RCU. The
+ * caller must ensure calls to these functions are made within rcu_read_lock()
+ * regions. Other readers (lock-free or otherwise) and modifications may be
+ * running concurrently.
+ *
+ * It is still required that the caller manage the synchronization and lifetimes
+ * of the items. So if RCU lock-free lookups are used, typically this would mean
+ * that the items have their own locks, or are amenable to lock-free access; and
+ * that the items are freed by RCU (or only freed after having been deleted from
+ * the radix tree *and* a synchronize_rcu() grace period).
+ *
+ * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
+ * access to data items when inserting into or looking up from the radix tree)
+ *
+ * radix_tree_tagged is able to be called without locking or RCU.
+ */
+
+/**
+ * radix_tree_deref_slot	- dereference a slot
+ * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
+ * Returns:	item that was stored in that slot with any direct pointer flag
+ *		removed.
+ *
+ * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
+ * locked across slot lookup and dereference.  More likely, will be used with
+ * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
+ */
+static inline void *radix_tree_deref_slot(void **pslot)
+{
+	return radix_tree_direct_to_ptr(*pslot);
+}
+/**
+ * radix_tree_replace_slot	- replace item in a slot
+ * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
+ * @item:	new item to store in the slot.
+ *
+ * For use with radix_tree_lookup_slot().  Caller must hold tree write locked
+ * across slot lookup and replacement.
+ */
+static inline void radix_tree_replace_slot(void **pslot, void *item)
+{
+	BUG_ON(radix_tree_is_direct_ptr(item));
+	rcu_assign_pointer(*pslot,
+		(void *)((unsigned long)item |
+			((unsigned long)*pslot & RADIX_TREE_DIRECT_PTR)));
+}
+
 int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
 void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index f13299a..03636d7 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -235,7 +235,7 @@
 	 */
 	int			active_name;
 	char			cache_name[2][20];
-	kmem_cache_t		*slab_cache; /* for allocating stripes */
+	struct kmem_cache		*slab_cache; /* for allocating stripes */
 
 	int			seq_flush, seq_write;
 	int			quiesce;
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 7bc6bfb..d0e4dce 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -739,7 +739,7 @@
 #define PUT_B_FREE_SPACE(p_s_bh,val)  do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0)
 
 /* Get right delimiting key. -- little endian */
-#define B_PRIGHT_DELIM_KEY(p_s_bh)   (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))
+#define B_PRIGHT_DELIM_KEY(p_s_bh)   (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))))
 
 /* Does the buffer contain a disk leaf. */
 #define B_IS_ITEMS_LEVEL(p_s_bh)     (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL)
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 62a7169..3a28742 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -249,7 +249,8 @@
 	int j_errno;
 
 	/* when flushing ordered buffers, throttle new ordered writers */
-	struct work_struct j_work;
+	struct delayed_work j_work;
+	struct super_block *j_work_sb;
 	atomic_t j_async_throttle;
 };
 
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 24accb4..c6a48bf 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -38,7 +38,7 @@
 	size_t subbufs_consumed;	/* count of sub-buffers consumed */
 	struct rchan *chan;		/* associated channel */
 	wait_queue_head_t read_wait;	/* reader wait queue */
-	struct work_struct wake_readers; /* reader wake-up work struct */
+	struct delayed_work wake_readers; /* reader wake-up work struct */
 	struct dentry *dentry;		/* channel file dentry */
 	struct kref kref;		/* channel buffer refcount */
 	struct page **page_array;	/* array of current buffer pages */
@@ -274,7 +274,7 @@
 /*
  * exported relay file operations, kernel/relay.c
  */
-extern struct file_operations relay_file_operations;
+extern const struct file_operations relay_file_operations;
 
 #endif /* _LINUX_RELAY_H */
 
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index db2c1df..36f8503 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -30,11 +30,11 @@
 
 #ifdef CONFIG_MMU
 
-extern kmem_cache_t *anon_vma_cachep;
+extern struct kmem_cache *anon_vma_cachep;
 
 static inline struct anon_vma *anon_vma_alloc(void)
 {
-	return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL);
+	return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
 }
 
 static inline void anon_vma_free(struct anon_vma *anon_vma)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 5d41dee..b0090e9 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -63,7 +63,7 @@
 #endif
 
 #define __RT_MUTEX_INITIALIZER(mutexname) \
-	{ .wait_lock = SPIN_LOCK_UNLOCKED \
+	{ .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
 	, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
 	, .owner = NULL \
 	__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index ae1fcad..813cee1 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -44,7 +44,8 @@
 #endif
 
 #define __RWSEM_INITIALIZER(name) \
-{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+  __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eafe4a7..dede82c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -194,7 +194,16 @@
 
 extern cpumask_t nohz_cpu_mask;
 
-extern void show_state(void);
+/*
+ * Only dump TASK_* tasks. (-1 for all tasks)
+ */
+extern void show_state_filter(unsigned long state_filter);
+
+static inline void show_state(void)
+{
+	show_state_filter(-1);
+}
+
 extern void show_regs(struct pt_regs *);
 
 /*
@@ -338,15 +347,23 @@
 
 	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 
-	unsigned dumpable:2;
 	cpumask_t cpu_vm_mask;
 
 	/* Architecture-specific MM context */
 	mm_context_t context;
 
-	/* Token based thrashing protection. */
-	unsigned long swap_token_time;
-	char recent_pagein;
+	/* Swap token stuff */
+	/*
+	 * Last value of global fault stamp as seen by this process.
+	 * In other words, this value gives an indication of how long
+	 * it has been since this task got the token.
+	 * Look at mm/thrash.c
+	 */
+	unsigned int faultstamp;
+	unsigned int token_priority;
+	unsigned int last_interval;
+
+	unsigned char dumpable:2;
 
 	/* coredumping support */
 	int core_waiters;
@@ -556,7 +573,7 @@
 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
 
 #ifdef CONFIG_SCHEDSTATS
-extern struct file_operations proc_schedstat_operations;
+extern const struct file_operations proc_schedstat_operations;
 #endif /* CONFIG_SCHEDSTATS */
 
 #ifdef CONFIG_TASK_DELAY_ACCT
@@ -1288,7 +1305,6 @@
 extern int kill_pid(struct pid *pid, int sig, int priv);
 extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
 extern int kill_pg_info(int, struct siginfo *, pid_t);
-extern int kill_proc_info(int, struct siginfo *, pid_t);
 extern void do_notify_parent(struct task_struct *, int);
 extern void force_sig(int, struct task_struct *);
 extern void force_sig_specific(int, struct task_struct *);
@@ -1610,87 +1626,6 @@
 
 extern void normalize_rt_tasks(void);
 
-#ifdef CONFIG_PM
-/*
- * Check if a process has been frozen
- */
-static inline int frozen(struct task_struct *p)
-{
-	return p->flags & PF_FROZEN;
-}
-
-/*
- * Check if there is a request to freeze a process
- */
-static inline int freezing(struct task_struct *p)
-{
-	return p->flags & PF_FREEZE;
-}
-
-/*
- * Request that a process be frozen
- * FIXME: SMP problem. We may not modify other process' flags!
- */
-static inline void freeze(struct task_struct *p)
-{
-	p->flags |= PF_FREEZE;
-}
-
-/*
- * Sometimes we may need to cancel the previous 'freeze' request
- */
-static inline void do_not_freeze(struct task_struct *p)
-{
-	p->flags &= ~PF_FREEZE;
-}
-
-/*
- * Wake up a frozen process
- */
-static inline int thaw_process(struct task_struct *p)
-{
-	if (frozen(p)) {
-		p->flags &= ~PF_FROZEN;
-		wake_up_process(p);
-		return 1;
-	}
-	return 0;
-}
-
-/*
- * freezing is complete, mark process as frozen
- */
-static inline void frozen_process(struct task_struct *p)
-{
-	p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
-}
-
-extern void refrigerator(void);
-extern int freeze_processes(void);
-extern void thaw_processes(void);
-
-static inline int try_to_freeze(void)
-{
-	if (freezing(current)) {
-		refrigerator();
-		return 1;
-	} else
-		return 0;
-}
-#else
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void freeze(struct task_struct *p) { BUG(); }
-static inline int thaw_process(struct task_struct *p) { return 1; }
-static inline void frozen_process(struct task_struct *p) { BUG(); }
-
-static inline void refrigerator(void) {}
-static inline int freeze_processes(void) { BUG(); return 0; }
-static inline void thaw_processes(void) {}
-
-static inline int try_to_freeze(void) { return 0; }
-
-#endif /* CONFIG_PM */
 #endif /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index b95f6eb..3e3cccb 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -20,7 +20,7 @@
 	loff_t index;
 	loff_t version;
 	struct mutex lock;
-	struct seq_operations *op;
+	const struct seq_operations *op;
 	void *private;
 };
 
@@ -31,7 +31,7 @@
 	int (*show) (struct seq_file *m, void *v);
 };
 
-int seq_open(struct file *, struct seq_operations *);
+int seq_open(struct file *, const struct seq_operations *);
 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
 loff_t seq_lseek(struct file *, loff_t, int);
 int seq_release(struct inode *, struct file *);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 8e96814..71310d8 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -41,6 +41,7 @@
 	PLAT8250_DEV_FOURPORT,
 	PLAT8250_DEV_ACCENT,
 	PLAT8250_DEV_BOCA,
+	PLAT8250_DEV_EXAR_ST16C554,
 	PLAT8250_DEV_HUB6,
 	PLAT8250_DEV_MCA,
 	PLAT8250_DEV_AU1X00,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 463ab95..8276721 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -132,6 +132,8 @@
 
 #define PORT_S3C2412	73
 
+/* Xilinx uartlite */
+#define PORT_UARTLITE	74
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 117135e..1474905 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -241,6 +241,8 @@
 struct pt_regs;
 extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
 
+extern struct kmem_cache *sighand_cachep;
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a05a5f7..4ff3940 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -332,20 +332,20 @@
 extern void kfree_skb(struct sk_buff *skb);
 extern void	       __kfree_skb(struct sk_buff *skb);
 extern struct sk_buff *__alloc_skb(unsigned int size,
-				   gfp_t priority, int fclone);
+				   gfp_t priority, int fclone, int node);
 static inline struct sk_buff *alloc_skb(unsigned int size,
 					gfp_t priority)
 {
-	return __alloc_skb(size, priority, 0);
+	return __alloc_skb(size, priority, 0, -1);
 }
 
 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
 					       gfp_t priority)
 {
-	return __alloc_skb(size, priority, 1);
+	return __alloc_skb(size, priority, 1, -1);
 }
 
-extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
 					    unsigned int size,
 					    gfp_t priority);
 extern void	       kfree_skbmem(struct sk_buff *skb);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c4947b8..2271886 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -7,27 +7,17 @@
 #ifndef _LINUX_SLAB_H
 #define	_LINUX_SLAB_H
 
-#if	defined(__KERNEL__)
+#ifdef __KERNEL__
 
-typedef struct kmem_cache kmem_cache_t;
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
+#include <asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
+#include <linux/compiler.h>
 
-#include	<linux/gfp.h>
-#include	<linux/init.h>
-#include	<linux/types.h>
-#include	<asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
-#include	<asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
-
-/* flags for kmem_cache_alloc() */
-#define	SLAB_NOFS		GFP_NOFS
-#define	SLAB_NOIO		GFP_NOIO
-#define	SLAB_ATOMIC		GFP_ATOMIC
-#define	SLAB_USER		GFP_USER
-#define	SLAB_KERNEL		GFP_KERNEL
-#define	SLAB_DMA		GFP_DMA
-
-#define SLAB_LEVEL_MASK		GFP_LEVEL_MASK
-
-#define	SLAB_NO_GROW		__GFP_NO_GROW	/* don't grow a cache */
+/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
+typedef struct kmem_cache kmem_cache_t __deprecated;
 
 /* flags to pass to kmem_cache_create().
  * The first 3 are only valid when the allocator as been build
@@ -57,22 +47,23 @@
 /* prototypes */
 extern void __init kmem_cache_init(void);
 
-extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
-				       void (*)(void *, kmem_cache_t *, unsigned long),
-				       void (*)(void *, kmem_cache_t *, unsigned long));
-extern void kmem_cache_destroy(kmem_cache_t *);
-extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t);
+extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+			unsigned long,
+			void (*)(void *, struct kmem_cache *, unsigned long),
+			void (*)(void *, struct kmem_cache *, unsigned long));
+extern void kmem_cache_destroy(struct kmem_cache *);
+extern int kmem_cache_shrink(struct kmem_cache *);
+extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
-extern void kmem_cache_free(kmem_cache_t *, void *);
-extern unsigned int kmem_cache_size(kmem_cache_t *);
-extern const char *kmem_cache_name(kmem_cache_t *);
+extern void kmem_cache_free(struct kmem_cache *, void *);
+extern unsigned int kmem_cache_size(struct kmem_cache *);
+extern const char *kmem_cache_name(struct kmem_cache *);
 
 /* Size description struct for general caches. */
 struct cache_sizes {
-	size_t		 cs_size;
-	kmem_cache_t	*cs_cachep;
-	kmem_cache_t	*cs_dmacachep;
+	size_t		 	cs_size;
+	struct kmem_cache	*cs_cachep;
+	struct kmem_cache	*cs_dmacachep;
 };
 extern struct cache_sizes malloc_sizes[];
 
@@ -211,7 +202,7 @@
 extern int slab_is_available(void);
 
 #ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
+extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 
 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
@@ -236,8 +227,27 @@
 	}
 	return __kmalloc_node(size, flags, node);
 }
+
+/*
+ * kmalloc_node_track_caller is a special version of kmalloc_node that
+ * records the calling function of the routine calling it for slab leak
+ * tracking instead of just the calling function (confusing, eh?).
+ * It's useful when the call to kmalloc_node comes from a widely-used
+ * standard allocator where we care about the real place the memory
+ * allocation request comes from.
+ */
+#ifndef CONFIG_DEBUG_SLAB
+#define kmalloc_node_track_caller(size, flags, node) \
+	__kmalloc_node(size, flags, node)
 #else
-static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
+#define kmalloc_node_track_caller(size, flags, node) \
+	__kmalloc_node_track_caller(size, flags, node, \
+			__builtin_return_address(0))
+#endif
+#else /* CONFIG_NUMA */
+static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
+					gfp_t flags, int node)
 {
 	return kmem_cache_alloc(cachep, flags);
 }
@@ -245,10 +255,13 @@
 {
 	return kmalloc(size, flags);
 }
+
+#define kmalloc_node_track_caller(size, flags, node) \
+	kmalloc_track_caller(size, flags)
 #endif
 
 extern int FASTCALL(kmem_cache_reap(int));
-extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
+extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
 
 #else /* CONFIG_SLOB */
 
@@ -283,16 +296,9 @@
 #define kzalloc(s, f) __kzalloc(s, f)
 #define kmalloc_track_caller kmalloc
 
-#endif /* CONFIG_SLOB */
+#define kmalloc_node_track_caller kmalloc_node
 
-/* System wide caches */
-extern kmem_cache_t	*vm_area_cachep;
-extern kmem_cache_t	*names_cachep;
-extern kmem_cache_t	*files_cachep;
-extern kmem_cache_t	*filp_cachep;
-extern kmem_cache_t	*fs_cachep;
-extern kmem_cache_t	*sighand_cachep;
-extern kmem_cache_t	*bio_cachep;
+#endif /* CONFIG_SLOB */
 
 #endif	/* __KERNEL__ */
 
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 5164998..7ba23ec 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -99,6 +99,13 @@
 static inline void smp_send_reschedule(int cpu) { }
 #define num_booting_cpus()			1
 #define smp_prepare_boot_cpu()			do {} while (0)
+static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
+				void *info, int retry, int wait)
+{
+	/* Disable interrupts here? */
+	func(info);
+	return 0;
+}
 
 #endif /* !SMP */
 
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 8451052..94b767d 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -52,6 +52,7 @@
 #include <linux/thread_info.h>
 #include <linux/kernel.h>
 #include <linux/stringify.h>
+#include <linux/bottom_half.h>
 
 #include <asm/system.h>
 
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index a2eb9b4..4a68125 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -30,7 +30,7 @@
 #define RPC_PIPE_WAIT_FOR_OPEN	1
 	int flags;
 	struct rpc_pipe_ops *ops;
-	struct work_struct queue_timeout;
+	struct delayed_work queue_timeout;
 };
 
 static inline struct rpc_inode *
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index f399c13..0746c3b 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -222,7 +222,7 @@
 
 #ifndef RPC_DEBUG
 # define RPC_WAITQ_INIT(var,qname) { \
-		.lock = SPIN_LOCK_UNLOCKED, \
+		.lock = __SPIN_LOCK_UNLOCKED(var.lock), \
 		.tasks = { \
 			[0] = LIST_HEAD_INIT(var.tasks[0]), \
 			[1] = LIST_HEAD_INIT(var.tasks[1]), \
@@ -231,7 +231,7 @@
 	}
 #else
 # define RPC_WAITQ_INIT(var,qname) { \
-		.lock = SPIN_LOCK_UNLOCKED, \
+		.lock = __SPIN_LOCK_UNLOCKED(var.lock), \
 		.tasks = { \
 			[0] = LIST_HEAD_INIT(var.tasks[0]), \
 			[1] = LIST_HEAD_INIT(var.tasks[1]), \
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 60394fb..3e04c15 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -177,7 +177,7 @@
 	unsigned long		connect_timeout,
 				bind_timeout,
 				reestablish_timeout;
-	struct work_struct	connect_worker;
+	struct delayed_work	connect_worker;
 	unsigned short		port;
 
 	/*
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index b1237f1..bf99bd4 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -9,10 +9,13 @@
 #include <linux/init.h>
 #include <linux/pm.h>
 
-/* page backup entry */
+/* struct pbe is used for creating lists of pages that should be restored
+ * atomically during the resume from disk, because the page frames they have
+ * occupied before the suspend are in use.
+ */
 struct pbe {
-	unsigned long address;		/* address of the copy */
-	unsigned long orig_address;	/* original address of page */
+	void *address;		/* address of the copy */
+	void *orig_address;	/* original address of a page */
 	struct pbe *next;
 };
 
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e7c36ba..add51ce 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -218,8 +218,6 @@
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct file *, struct page *);
 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page,
-				struct bio **bio_chain);
 extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err);
 
 /* linux/mm/swap_state.c */
@@ -247,9 +245,10 @@
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern void swap_free(swp_entry_t);
 extern void free_swap_and_cache(swp_entry_t);
-extern int swap_type_of(dev_t);
+extern int swap_type_of(dev_t, sector_t);
 extern unsigned int count_swap_pages(int, int);
 extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
+extern sector_t swapdev_block(int, pgoff_t);
 extern struct swap_info_struct *get_swap_info_struct(unsigned);
 extern int can_share_swap_page(struct page *);
 extern int remove_exclusive_swap_page(struct page *);
@@ -259,7 +258,6 @@
 
 /* linux/mm/thrash.c */
 extern struct mm_struct * swap_token_mm;
-extern unsigned long swap_token_default_timeout;
 extern void grab_swap_token(void);
 extern void __put_swap_token(struct mm_struct *);
 
diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h
index 6562a20..7e9680f 100644
--- a/include/linux/taskstats_kern.h
+++ b/include/linux/taskstats_kern.h
@@ -12,64 +12,27 @@
 #include <net/genetlink.h>
 
 #ifdef CONFIG_TASKSTATS
-extern kmem_cache_t *taskstats_cache;
+extern struct kmem_cache *taskstats_cache;
 extern struct mutex taskstats_exit_mutex;
 
-static inline void taskstats_exit_free(struct taskstats *tidstats)
-{
-	if (tidstats)
-		kmem_cache_free(taskstats_cache, tidstats);
-}
-
 static inline void taskstats_tgid_init(struct signal_struct *sig)
 {
 	sig->stats = NULL;
 }
 
-static inline void taskstats_tgid_alloc(struct task_struct *tsk)
-{
-	struct signal_struct *sig = tsk->signal;
-	struct taskstats *stats;
-
-	if (sig->stats != NULL)
-		return;
-
-	/* No problem if kmem_cache_zalloc() fails */
-	stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
-
-	spin_lock_irq(&tsk->sighand->siglock);
-	if (!sig->stats) {
-		sig->stats = stats;
-		stats = NULL;
-	}
-	spin_unlock_irq(&tsk->sighand->siglock);
-
-	if (stats)
-		kmem_cache_free(taskstats_cache, stats);
-}
-
 static inline void taskstats_tgid_free(struct signal_struct *sig)
 {
 	if (sig->stats)
 		kmem_cache_free(taskstats_cache, sig->stats);
 }
 
-extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
-extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
+extern void taskstats_exit(struct task_struct *, int group_dead);
 extern void taskstats_init_early(void);
 #else
-static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
-{}
-static inline void taskstats_exit_free(struct taskstats *ptidstats)
-{}
-static inline void taskstats_exit_send(struct task_struct *tsk,
-				       struct taskstats *tidstats,
-				       int group_dead, unsigned int cpu)
+static inline void taskstats_exit(struct task_struct *tsk, int group_dead)
 {}
 static inline void taskstats_tgid_init(struct signal_struct *sig)
 {}
-static inline void taskstats_tgid_alloc(struct task_struct *tsk)
-{}
 static inline void taskstats_tgid_free(struct signal_struct *sig)
 {}
 static inline void taskstats_init_early(void)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 65321f91..f717f08 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -53,7 +53,7 @@
 };
 
 struct tty_bufhead {
-	struct work_struct		work;
+	struct delayed_work work;
 	struct semaphore pty_sem;
 	spinlock_t lock;
 	struct tty_buffer *head;	/* Queue head */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 65a68da..975c963 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,8 +1,43 @@
 #ifndef __LINUX_UACCESS_H__
 #define __LINUX_UACCESS_H__
 
+#include <linux/preempt.h>
 #include <asm/uaccess.h>
 
+/*
+ * These routines enable/disable the pagefault handler in that
+ * it will not take any locks and go straight to the fixup table.
+ *
+ * They have great resemblance to the preempt_disable/enable calls
+ * and in fact they are identical; this is because currently there is
+ * no other way to make the pagefault handlers do this. So we do
+ * disable preemption but we don't necessarily care about that.
+ */
+static inline void pagefault_disable(void)
+{
+	inc_preempt_count();
+	/*
+	 * make sure to have issued the store before a pagefault
+	 * can hit.
+	 */
+	barrier();
+}
+
+static inline void pagefault_enable(void)
+{
+	/*
+	 * make sure to issue those last loads/stores before enabling
+	 * the pagefault handler again.
+	 */
+	barrier();
+	dec_preempt_count();
+	/*
+	 * make sure we do..
+	 */
+	barrier();
+	preempt_check_resched();
+}
+
 #ifndef ARCH_HAS_NOCACHE_UACCESS
 
 static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -30,14 +65,22 @@
  * do_page_fault() doesn't attempt to take mmap_sem.  This makes
  * probe_kernel_address() suitable for use within regions where the caller
  * already holds mmap_sem, or other locks which nest inside mmap_sem.
+ * This must be a macro because __get_user() needs to know the types of the
+ * args.
+ *
+ * We don't include enough header files to be able to do the set_fs().  We
+ * require that the probe_kernel_address() caller will do that.
  */
 #define probe_kernel_address(addr, retval)		\
 	({						\
 		long ret;				\
+		mm_segment_t old_fs = get_fs();		\
 							\
-		inc_preempt_count();			\
-		ret = __get_user(retval, (__force typeof(*addr) __user *)addr);\
-		dec_preempt_count();			\
+		set_fs(KERNEL_DS);			\
+		pagefault_disable();			\
+		ret = __get_user(retval, (__force typeof(retval) __user *)(addr));		\
+		pagefault_enable();			\
+		set_fs(old_fs);				\
 		ret;					\
 	})
 
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 0cd73ed..aab5b1b 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -388,7 +388,7 @@
 
 	int pm_usage_cnt;		/* usage counter for autosuspend */
 #ifdef CONFIG_PM
-	struct work_struct autosuspend;	/* for delayed autosuspends */
+	struct delayed_work autosuspend; /* for delayed autosuspends */
 	struct mutex pm_mutex;		/* protects PM operations */
 
 	unsigned auto_pm:1;		/* autosuspend/resume in progress */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9bca353..f0cb1df 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -11,12 +11,23 @@
 
 struct workqueue_struct;
 
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+
 struct work_struct {
-	unsigned long pending;
+	/* the first word is the work queue pointer and the flags rolled into
+	 * one */
+	unsigned long management;
+#define WORK_STRUCT_PENDING 0		/* T if work item pending execution */
+#define WORK_STRUCT_NOAUTOREL 1		/* F if work item automatically released on exec */
+#define WORK_STRUCT_FLAG_MASK (3UL)
+#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
 	struct list_head entry;
-	void (*func)(void *);
-	void *data;
-	void *wq_data;
+	work_func_t func;
+};
+
+struct delayed_work {
+	struct work_struct work;
 	struct timer_list timer;
 };
 
@@ -24,77 +35,159 @@
 	struct work_struct work;
 };
 
-#define __WORK_INITIALIZER(n, f, d) {				\
+#define __WORK_INITIALIZER(n, f) {				\
+	.management = 0,					\
         .entry	= { &(n).entry, &(n).entry },			\
 	.func = (f),						\
-	.data = (d),						\
+	}
+
+#define __WORK_INITIALIZER_NAR(n, f) {				\
+	.management = (1 << WORK_STRUCT_NOAUTOREL),		\
+        .entry	= { &(n).entry, &(n).entry },			\
+	.func = (f),						\
+	}
+
+#define __DELAYED_WORK_INITIALIZER(n, f) {			\
+	.work = __WORK_INITIALIZER((n).work, (f)),		\
 	.timer = TIMER_INITIALIZER(NULL, 0, 0),			\
 	}
 
-#define DECLARE_WORK(n, f, d)					\
-	struct work_struct n = __WORK_INITIALIZER(n, f, d)
+#define __DELAYED_WORK_INITIALIZER_NAR(n, f) {			\
+	.work = __WORK_INITIALIZER_NAR((n).work, (f)),		\
+	.timer = TIMER_INITIALIZER(NULL, 0, 0),			\
+	}
+
+#define DECLARE_WORK(n, f)					\
+	struct work_struct n = __WORK_INITIALIZER(n, f)
+
+#define DECLARE_WORK_NAR(n, f)					\
+	struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
+
+#define DECLARE_DELAYED_WORK(n, f)				\
+	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
+
+#define DECLARE_DELAYED_WORK_NAR(n, f)			\
+	struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
 
 /*
- * initialize a work-struct's func and data pointers:
+ * initialize a work item's function pointer
  */
-#define PREPARE_WORK(_work, _func, _data)			\
+#define PREPARE_WORK(_work, _func)				\
 	do {							\
-		(_work)->func = _func;				\
-		(_work)->data = _data;				\
+		(_work)->func = (_func);			\
 	} while (0)
 
+#define PREPARE_DELAYED_WORK(_work, _func)			\
+	PREPARE_WORK(&(_work)->work, (_func))
+
 /*
- * initialize all of a work-struct:
+ * initialize all of a work item in one go
  */
-#define INIT_WORK(_work, _func, _data)				\
+#define INIT_WORK(_work, _func)					\
 	do {							\
+		(_work)->management = 0;			\
 		INIT_LIST_HEAD(&(_work)->entry);		\
-		(_work)->pending = 0;				\
-		PREPARE_WORK((_work), (_func), (_data));	\
+		PREPARE_WORK((_work), (_func));			\
+	} while (0)
+
+#define INIT_WORK_NAR(_work, _func)					\
+	do {								\
+		(_work)->management = (1 << WORK_STRUCT_NOAUTOREL);	\
+		INIT_LIST_HEAD(&(_work)->entry);			\
+		PREPARE_WORK((_work), (_func));				\
+	} while (0)
+
+#define INIT_DELAYED_WORK(_work, _func)				\
+	do {							\
+		INIT_WORK(&(_work)->work, (_func));		\
 		init_timer(&(_work)->timer);			\
 	} while (0)
 
+#define INIT_DELAYED_WORK_NAR(_work, _func)			\
+	do {							\
+		INIT_WORK_NAR(&(_work)->work, (_func));		\
+		init_timer(&(_work)->timer);			\
+	} while (0)
+
+/**
+ * work_pending - Find out whether a work item is currently pending
+ * @work: The work item in question
+ */
+#define work_pending(work) \
+	test_bit(WORK_STRUCT_PENDING, &(work)->management)
+
+/**
+ * delayed_work_pending - Find out whether a delayable work item is currently
+ * pending
+ * @work: The work item in question
+ */
+#define delayed_work_pending(work) \
+	test_bit(WORK_STRUCT_PENDING, &(work)->work.management)
+
+/**
+ * work_release - Release a work item under execution
+ * @work: The work item to release
+ *
+ * This is used to release a work item that has been initialised with automatic
+ * release mode disabled (WORK_STRUCT_NOAUTOREL is set).  This gives the work
+ * function the opportunity to grab auxiliary data from the container of the
+ * work_struct before clearing the pending bit as the work_struct may be
+ * subject to deallocation the moment the pending bit is cleared.
+ *
+ * In such a case, this should be called in the work function after it has
+ * fetched any data it may require from the containter of the work_struct.
+ * After this function has been called, the work_struct may be scheduled for
+ * further execution or it may be deallocated unless other precautions are
+ * taken.
+ *
+ * This should also be used to release a delayed work item.
+ */
+#define work_release(work) \
+	clear_bit(WORK_STRUCT_PENDING, &(work)->management)
+
+
 extern struct workqueue_struct *__create_workqueue(const char *name,
-						    int singlethread);
-#define create_workqueue(name) __create_workqueue((name), 0)
-#define create_singlethread_workqueue(name) __create_workqueue((name), 1)
+						    int singlethread,
+						    int freezeable);
+#define create_workqueue(name) __create_workqueue((name), 0, 0)
+#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1)
+#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
 extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
+extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-	struct work_struct *work, unsigned long delay);
+	struct delayed_work *work, unsigned long delay);
 extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
 
 extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
+extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
 
-extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
-extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
+extern int schedule_on_each_cpu(work_func_t func);
 extern void flush_scheduled_work(void);
 extern int current_is_keventd(void);
 extern int keventd_up(void);
 
 extern void init_workqueues(void);
-void cancel_rearming_delayed_work(struct work_struct *work);
+void cancel_rearming_delayed_work(struct delayed_work *work);
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
-				       struct work_struct *);
-int execute_in_process_context(void (*fn)(void *), void *,
-			       struct execute_work *);
+				       struct delayed_work *);
+int execute_in_process_context(work_func_t fn, struct execute_work *);
 
 /*
  * Kill off a pending schedule_delayed_work().  Note that the work callback
  * function may still be running on return from cancel_delayed_work().  Run
  * flush_scheduled_work() to wait on it.
  */
-static inline int cancel_delayed_work(struct work_struct *work)
+static inline int cancel_delayed_work(struct delayed_work *work)
 {
 	int ret;
 
 	ret = del_timer_sync(&work->timer);
 	if (ret)
-		clear_bit(0, &work->pending);
+		clear_bit(WORK_STRUCT_PENDING, &work->work.management);
 	return ret;
 }
 
diff --git a/include/net/dst.h b/include/net/dst.h
index e156e38..62b7e75 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -98,7 +98,7 @@
 	int			entry_size;
 
 	atomic_t		entries;
-	kmem_cache_t 		*kmem_cachep;
+	struct kmem_cache 		*kmem_cachep;
 };
 
 #ifdef __KERNEL__
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h
index 617b672..8911927 100644
--- a/include/net/ieee80211softmac.h
+++ b/include/net/ieee80211softmac.h
@@ -108,8 +108,8 @@
 	/* Scan retries remaining */
 	int scan_retry;
 
-	struct work_struct work;
-	struct work_struct timeout;
+	struct delayed_work work;
+	struct delayed_work timeout;
 };
 
 struct ieee80211softmac_bss_info {
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index a9eb2ea..34cc76e 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -125,7 +125,7 @@
 	rwlock_t			lhash_lock ____cacheline_aligned;
 	atomic_t			lhash_users;
 	wait_queue_head_t		lhash_wait;
-	kmem_cache_t			*bind_bucket_cachep;
+	struct kmem_cache			*bind_bucket_cachep;
 };
 
 static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -136,10 +136,10 @@
 }
 
 extern struct inet_bind_bucket *
-		    inet_bind_bucket_create(kmem_cache_t *cachep,
+		    inet_bind_bucket_create(struct kmem_cache *cachep,
 					    struct inet_bind_hashbucket *head,
 					    const unsigned short snum);
-extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
+extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
 				     struct inet_bind_bucket *tb);
 
 static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 5f48748..f7be1ac 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -84,7 +84,7 @@
 };
 
 extern void inet_twdr_hangman(unsigned long data);
-extern void inet_twdr_twkill_work(void *data);
+extern void inet_twdr_twkill_work(struct work_struct *work);
 extern void inet_twdr_twcal_tick(unsigned long data);
 
 #if (BITS_PER_LONG == 64)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index c8aacbd..2396703 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -160,7 +160,7 @@
 	atomic_t		entries;
 	rwlock_t		lock;
 	unsigned long		last_rand;
-	kmem_cache_t		*kmem_cachep;
+	struct kmem_cache		*kmem_cachep;
 	struct neigh_statistics	*stats;
 	struct neighbour	**hash_buckets;
 	unsigned int		hash_mask;
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index cef3136..41bcc9e 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -7,7 +7,7 @@
 #include <net/netfilter/nf_conntrack.h>
 
 extern struct list_head nf_conntrack_expect_list;
-extern kmem_cache_t *nf_conntrack_expect_cachep;
+extern struct kmem_cache *nf_conntrack_expect_cachep;
 extern struct file_operations exp_file_ops;
 
 struct nf_conntrack_expect
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index e37baaf..7aed02c 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -29,7 +29,7 @@
 struct request_sock_ops {
 	int		family;
 	int		obj_size;
-	kmem_cache_t	*slab;
+	struct kmem_cache	*slab;
 	int		(*rtx_syn_ack)(struct sock *sk,
 				       struct request_sock *req,
 				       struct dst_entry *dst);
@@ -60,7 +60,7 @@
 
 static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
 {
-	struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
+	struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
 
 	if (req != NULL)
 		req->rsk_ops = ops;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f8cbe40..c089f93 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1030,7 +1030,7 @@
 void sctp_inq_free(struct sctp_inq *);
 void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
 struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
-void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
+void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
 
 /* This is the structure we use to hold outbound chunks.  You push
  * chunks in and they automatically pop out the other end as bundled
diff --git a/include/net/sock.h b/include/net/sock.h
index fe3a33f..03684e7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -571,7 +571,7 @@
 	int			*sysctl_rmem;
 	int			max_header;
 
-	kmem_cache_t		*slab;
+	struct kmem_cache		*slab;
 	unsigned int		obj_size;
 
 	atomic_t		*orphan_count;
@@ -746,6 +746,25 @@
  */
 #define sock_owned_by_user(sk)	((sk)->sk_lock.owner)
 
+/*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+ *
+ * Mark both the sk_lock and the sk_lock.slock as a
+ * per-address-family lock class.
+ */
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key) 	\
+do {									\
+	sk->sk_lock.owner = NULL;					\
+	init_waitqueue_head(&sk->sk_lock.wq);				\
+	spin_lock_init(&(sk)->sk_lock.slock);				\
+	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
+			sizeof((sk)->sk_lock));				\
+	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
+		       	(skey), (sname));				\
+	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
+} while (0)
+
 extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
 
 static inline void lock_sock(struct sock *sk)
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index d7a306e..1e1ee32 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -15,7 +15,7 @@
 #include <net/sock.h>
 
 struct timewait_sock_ops {
-	kmem_cache_t	*twsk_slab;
+	struct kmem_cache	*twsk_slab;
 	unsigned int	twsk_obj_size;
 	int		(*twsk_unique)(struct sock *sk,
 				       struct sock *sktw, void *twp);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 44b2f82..0c775fc 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -201,9 +201,14 @@
         void *lldd_dev;
 };
 
+struct sas_discovery_event {
+	struct work_struct work;
+	struct asd_sas_port *port;
+};
+
 struct sas_discovery {
 	spinlock_t disc_event_lock;
-	struct work_struct disc_work[DISC_NUM_EVENTS];
+	struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
 	unsigned long    pending;
 	u8     fanout_sas_addr[8];
 	u8     eeds_a[8];
@@ -249,14 +254,19 @@
 	void *lldd_port;	  /* not touched by the sas class code */
 };
 
+struct asd_sas_event {
+	struct work_struct work;
+	struct asd_sas_phy *phy;
+};
+
 /* The phy pretty much is controlled by the LLDD.
  * The class only reads those fields.
  */
 struct asd_sas_phy {
 /* private: */
 	/* protected by ha->event_lock */
-	struct work_struct   port_events[PORT_NUM_EVENTS];
-	struct work_struct   phy_events[PHY_NUM_EVENTS];
+	struct asd_sas_event   port_events[PORT_NUM_EVENTS];
+	struct asd_sas_event   phy_events[PHY_NUM_EVENTS];
 
 	unsigned long port_events_pending;
 	unsigned long phy_events_pending;
@@ -308,10 +318,15 @@
 	int               queue_thread_kill;
 };
 
+struct sas_ha_event {
+	struct work_struct work;
+	struct sas_ha_struct *ha;
+};
+
 struct sas_ha_struct {
 /* private: */
 	spinlock_t       event_lock;
-	struct work_struct ha_events[HA_NUM_EVENTS];
+	struct sas_ha_event ha_events[HA_NUM_EVENTS];
 	unsigned long	 pending;
 
 	struct scsi_core core;
@@ -542,7 +557,7 @@
 
 static inline struct sas_task *sas_alloc_task(gfp_t flags)
 {
-	extern kmem_cache_t *sas_task_cache;
+	extern struct kmem_cache *sas_task_cache;
 	struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags);
 
 	if (task) {
@@ -560,7 +575,7 @@
 static inline void sas_free_task(struct sas_task *task)
 {
 	if (task) {
-		extern kmem_cache_t *sas_task_cache;
+		extern struct kmem_cache *sas_task_cache;
 		BUG_ON(!list_empty(&task->list));
 		kmem_cache_free(sas_task_cache, task);
 	}
@@ -631,6 +646,6 @@
 
 void sas_init_dev(struct domain_device *);
 
-void sas_task_abort(struct sas_task *task);
+void sas_task_abort(struct work_struct *);
 
 #endif /* _SASLIB_H_ */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index fd35232..798f7c7 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -206,9 +206,9 @@
 	u8 flags;
 	struct list_head peers;
 	struct device dev;
- 	struct work_struct dev_loss_work;
+ 	struct delayed_work dev_loss_work;
  	struct work_struct scan_work;
- 	struct work_struct fail_io_work;
+ 	struct delayed_work fail_io_work;
  	struct work_struct stgt_delete_work;
 	struct work_struct rport_delete_work;
 } __attribute__((aligned(sizeof(unsigned long))));
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 4b95c89..d5c218d 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -176,7 +176,7 @@
 
 	/* recovery fields */
 	int recovery_tmo;
-	struct work_struct recovery_work;
+	struct delayed_work recovery_work;
 
 	int target_id;
 
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 4c43521..3372039 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -511,7 +511,7 @@
 #ifdef CONFIG_SND_AC97_POWER_SAVE
 	unsigned int power_up;	/* power states */
 	struct workqueue_struct *power_workq;
-	struct work_struct power_work;
+	struct delayed_work power_work;
 #endif
 	struct device dev;
 };
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 11702aa..2ee0616 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -182,7 +182,7 @@
 	unsigned char rcs0;
 	unsigned char rcs1;
 	struct workqueue_struct *workqueue;
-	struct work_struct work;
+	struct delayed_work work;
 	void *change_callback_private;
 	void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1);
 };
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 919a80c..2cfd7cb 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -6,6 +6,7 @@
 #include <linux/romfs_fs.h>
 #include <linux/initrd.h>
 #include <linux/sched.h>
+#include <linux/freezer.h>
 
 #include "do_mounts.h"
 
diff --git a/init/initramfs.c b/init/initramfs.c
index d28c109..85f0403 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -182,6 +182,10 @@
 
 static int __init do_header(void)
 {
+	if (memcmp(collected, "070707", 6)==0) {
+		error("incorrect cpio method used: use -H newc option");
+		return 1;
+	}
 	if (memcmp(collected, "070701", 6)) {
 		error("no cpio magic");
 		return 1;
diff --git a/init/main.c b/init/main.c
index 985c9ed..1174ae3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -74,6 +74,10 @@
 #error Sorry, your GCC is too old. It builds incorrect kernels.
 #endif
 
+#if __GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ == 0
+#warning gcc-4.1.0 is known to miscompile the kernel.  A different compiler version is recommended.
+#endif
+
 static int init(void *);
 
 extern void init_IRQ(void);
diff --git a/ipc/compat.c b/ipc/compat.c
index 4d20cfd..fa18141 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -115,7 +115,6 @@
 
 extern int sem_ctls[];
 #define sc_semopm	(sem_ctls[2])
-#define MAXBUF (64*1024)
 
 static inline int compat_ipc_parse_version(int *cmd)
 {
@@ -307,35 +306,30 @@
 
 long compat_sys_msgsnd(int first, int second, int third, void __user *uptr)
 {
-	struct msgbuf __user *p;
 	struct compat_msgbuf __user *up = uptr;
 	long type;
 
 	if (first < 0)
 		return -EINVAL;
-	if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf)))
+	if (second < 0)
 		return -EINVAL;
 
-	p = compat_alloc_user_space(second + sizeof(struct msgbuf));
-	if (get_user(type, &up->mtype) ||
-	    put_user(type, &p->mtype) ||
-	    copy_in_user(p->mtext, up->mtext, second))
+	if (get_user(type, &up->mtype))
 		return -EFAULT;
 
-	return sys_msgsnd(first, p, second, third);
+	return do_msgsnd(first, type, up->mtext, second, third);
 }
 
 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
 			   int version, void __user *uptr)
 {
-	struct msgbuf __user *p;
 	struct compat_msgbuf __user *up;
 	long type;
 	int err;
 
 	if (first < 0)
 		return -EINVAL;
-	if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf)))
+	if (second < 0)
 		return -EINVAL;
 
 	if (!version) {
@@ -349,14 +343,11 @@
 		uptr = compat_ptr(ipck.msgp);
 		msgtyp = ipck.msgtyp;
 	}
-	p = compat_alloc_user_space(second + sizeof(struct msgbuf));
-	err = sys_msgrcv(first, p, second, msgtyp, third);
+	up = uptr;
+	err = do_msgrcv(first, &type, up->mtext, second, msgtyp, third);
 	if (err < 0)
 		goto out;
-	up = uptr;
-	if (get_user(type, &p->mtype) ||
-	    put_user(type, &up->mtype) ||
-	    copy_in_user(up->mtext, p->mtext, err))
+	if (put_user(type, &up->mtype))
 		err = -EFAULT;
 out:
 	return err;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 7c27400..3acc166 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -90,7 +90,7 @@
 static void remove_notification(struct mqueue_inode_info *info);
 
 static spinlock_t mq_lock;
-static kmem_cache_t *mqueue_inode_cachep;
+static struct kmem_cache *mqueue_inode_cachep;
 static struct vfsmount *mqueue_mnt;
 
 static unsigned int queues_count;
@@ -211,7 +211,7 @@
 	return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 
@@ -224,7 +224,7 @@
 {
 	struct mqueue_inode_info *ei;
 
-	ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
diff --git a/ipc/msg.c b/ipc/msg.c
index 1266b1d..a388824 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -626,12 +626,11 @@
 	return 0;
 }
 
-asmlinkage long
-sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
+long do_msgsnd(int msqid, long mtype, void __user *mtext,
+		size_t msgsz, int msgflg)
 {
 	struct msg_queue *msq;
 	struct msg_msg *msg;
-	long mtype;
 	int err;
 	struct ipc_namespace *ns;
 
@@ -639,12 +638,10 @@
 
 	if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
 		return -EINVAL;
-	if (get_user(mtype, &msgp->mtype))
-		return -EFAULT;
 	if (mtype < 1)
 		return -EINVAL;
 
-	msg = load_msg(msgp->mtext, msgsz);
+	msg = load_msg(mtext, msgsz);
 	if (IS_ERR(msg))
 		return PTR_ERR(msg);
 
@@ -723,6 +720,16 @@
 	return err;
 }
 
+asmlinkage long
+sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
+{
+	long mtype;
+
+	if (get_user(mtype, &msgp->mtype))
+		return -EFAULT;
+	return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
+}
+
 static inline int convert_mode(long *msgtyp, int msgflg)
 {
 	/*
@@ -742,8 +749,8 @@
 	return SEARCH_EQUAL;
 }
 
-asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
-			   long msgtyp, int msgflg)
+long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
+		size_t msgsz, long msgtyp, int msgflg)
 {
 	struct msg_queue *msq;
 	struct msg_msg *msg;
@@ -889,15 +896,30 @@
 		return PTR_ERR(msg);
 
 	msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
-	if (put_user (msg->m_type, &msgp->mtype) ||
-	    store_msg(msgp->mtext, msg, msgsz)) {
+	*pmtype = msg->m_type;
+	if (store_msg(mtext, msg, msgsz))
 		msgsz = -EFAULT;
-	}
+
 	free_msg(msg);
 
 	return msgsz;
 }
 
+asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+			   long msgtyp, int msgflg)
+{
+	long err, mtype;
+
+	err =  do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
+	if (err < 0)
+		goto out;
+
+	if (put_user(mtype, &msgp->mtype))
+		err = -EFAULT;
+out:
+	return err;
+}
+
 #ifdef CONFIG_PROC_FS
 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
 {
diff --git a/ipc/sem.c b/ipc/sem.c
index 21b3289..d3e12ef 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1070,14 +1070,13 @@
 	ipc_rcu_getref(sma);
 	sem_unlock(sma);
 
-	new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
 	if (!new) {
 		ipc_lock_by_ptr(&sma->sem_perm);
 		ipc_rcu_putref(sma);
 		sem_unlock(sma);
 		return ERR_PTR(-ENOMEM);
 	}
-	memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems);
 	new->semadj = (short *) &new[1];
 	new->semid = semid;
 
diff --git a/ipc/util.c b/ipc/util.c
index cd8bb14..a9b7a22 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -514,6 +514,11 @@
 	container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
 }
 
+static void ipc_do_vfree(struct work_struct *work)
+{
+	vfree(container_of(work, struct ipc_rcu_sched, work));
+}
+
 /**
  * ipc_schedule_free - free ipc + rcu space
  * @head: RCU callback structure for queued work
@@ -528,7 +533,7 @@
 	struct ipc_rcu_sched *sched =
 			container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
 
-	INIT_WORK(&sched->work, vfree, sched);
+	INIT_WORK(&sched->work, ipc_do_vfree);
 	schedule_work(&sched->work);
 }
 
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 248e1c3..4af15802 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -7,7 +7,7 @@
 	default HZ_250
 	help
 	 Allows the configuration of the timer frequency. It is customary
-	 to have the timer interrupt run at 1000 HZ but 100 HZ may be more
+	 to have the timer interrupt run at 1000 Hz but 100 Hz may be more
 	 beneficial for servers and NUMA systems that do not need to have
 	 a fast response for user interaction and that may experience bus
 	 contention and cacheline bounces as a result of timer interrupts.
@@ -19,21 +19,30 @@
 	config HZ_100
 		bool "100 HZ"
 	help
-	  100 HZ is a typical choice for servers, SMP and NUMA systems
+	  100 Hz is a typical choice for servers, SMP and NUMA systems
 	  with lots of processors that may show reduced performance if
 	  too many timer interrupts are occurring.
 
 	config HZ_250
 		bool "250 HZ"
 	help
-	 250 HZ is a good compromise choice allowing server performance
+	 250 Hz is a good compromise choice allowing server performance
 	 while also showing good interactive responsiveness even
-	 on SMP and NUMA systems.
+	 on SMP and NUMA systems. If you are going to be using NTSC video
+	 or multimedia, selected 300Hz instead.
+
+	config HZ_300
+		bool "300 HZ"
+	help
+	 300 Hz is a good compromise choice allowing server performance
+	 while also showing good interactive responsiveness even
+	 on SMP and NUMA systems and exactly dividing by both PAL and
+	 NTSC frame rates for video and multimedia work.
 
 	config HZ_1000
 		bool "1000 HZ"
 	help
-	 1000 HZ is the preferred choice for desktop systems and other
+	 1000 Hz is the preferred choice for desktop systems and other
 	 systems requiring fast interactive responses to events.
 
 endchoice
@@ -42,5 +51,6 @@
 	int
 	default 100 if HZ_100
 	default 250 if HZ_250
+	default 300 if HZ_300
 	default 1000 if HZ_1000
 
diff --git a/kernel/acct.c b/kernel/acct.c
index 0aad5ca..dc12db8 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -89,7 +89,8 @@
 	struct timer_list	timer;
 };
 
-static struct acct_glbs acct_globals __cacheline_aligned = {SPIN_LOCK_UNLOCKED};
+static struct acct_glbs acct_globals __cacheline_aligned =
+	{__SPIN_LOCK_UNLOCKED(acct_globals.lock)};
 
 /*
  * Called whenever the timer says to check the free space.
diff --git a/kernel/audit.c b/kernel/audit.c
index 98106f6..d9b690a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -57,6 +57,7 @@
 #include <linux/netlink.h>
 #include <linux/selinux.h>
 #include <linux/inotify.h>
+#include <linux/freezer.h>
 
 #include "audit.h"
 
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 4f40d92..2e896f8 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -636,10 +636,9 @@
 	struct audit_rule *rule;
 	int i;
 
-	rule = kmalloc(sizeof(*rule), GFP_KERNEL);
+	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
 	if (unlikely(!rule))
 		return NULL;
-	memset(rule, 0, sizeof(*rule));
 
 	rule->flags = krule->flags | krule->listnr;
 	rule->action = krule->action;
diff --git a/kernel/configs.c b/kernel/configs.c
index f9e3197..8fa1fb2 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -75,7 +75,7 @@
 	return count;
 }
 
-static struct file_operations ikconfig_file_ops = {
+static const struct file_operations ikconfig_file_ops = {
 	.owner = THIS_MODULE,
 	.read = ikconfig_read_current,
 };
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 272254f..9124669 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -270,11 +270,7 @@
 			goto out;
 		}
 	}
-	error = set_cpus_allowed(current, cpumask_of_cpu(first_cpu));
-	if (error) {
-		printk(KERN_ERR "Could not run on CPU%d\n", first_cpu);
-		goto out;
-	}
+
 	/* We take down all of the non-boot CPUs in one shot to avoid races
 	 * with the userspace trying to use the CPU hotplug at the same time
 	 */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6313c38..0a6b4d8 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -729,9 +729,11 @@
 	}
 
 	/* Remaining checks don't apply to root cpuset */
-	if ((par = cur->parent) == NULL)
+	if (cur == &top_cpuset)
 		return 0;
 
+	par = cur->parent;
+
 	/* We must be a subset of our parent cpuset */
 	if (!is_cpuset_subset(trial, par))
 		return -EACCES;
@@ -1060,10 +1062,7 @@
 	cpu_exclusive_changed =
 		(is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
 	mutex_lock(&callback_mutex);
-	if (turning_on)
-		set_bit(bit, &cs->flags);
-	else
-		clear_bit(bit, &cs->flags);
+	cs->flags = trialcs.flags;
 	mutex_unlock(&callback_mutex);
 
 	if (cpu_exclusive_changed)
@@ -1281,7 +1280,8 @@
 	FILE_TASKLIST,
 } cpuset_filetype_t;
 
-static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf,
+static ssize_t cpuset_common_file_write(struct file *file,
+					const char __user *userbuf,
 					size_t nbytes, loff_t *unused_ppos)
 {
 	struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
@@ -1292,7 +1292,7 @@
 	int retval = 0;
 
 	/* Crude upper limit on largest legitimate cpulist user might write. */
-	if (nbytes > 100 + 6 * NR_CPUS)
+	if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES))
 		return -E2BIG;
 
 	/* +1 for nul-terminator */
@@ -1532,7 +1532,7 @@
 	return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
 }
 
-static struct file_operations cpuset_file_operations = {
+static const struct file_operations cpuset_file_operations = {
 	.read = cpuset_file_read,
 	.write = cpuset_file_write,
 	.llseek = generic_file_llseek,
@@ -2045,7 +2045,6 @@
 	return err;
 }
 
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG)
 /*
  * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
  * or memory nodes, we need to walk over the cpuset hierarchy,
@@ -2109,9 +2108,7 @@
 	mutex_unlock(&callback_mutex);
 	mutex_unlock(&manage_mutex);
 }
-#endif
 
-#ifdef CONFIG_HOTPLUG_CPU
 /*
  * The top_cpuset tracks what CPUs and Memory Nodes are online,
  * period.  This is necessary in order to make cpusets transparent
@@ -2128,7 +2125,6 @@
 	common_cpu_mem_hotplug_unplug();
 	return 0;
 }
-#endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 /*
@@ -2610,7 +2606,7 @@
 	return single_open(file, proc_cpuset_show, pid);
 }
 
-struct file_operations proc_cpuset_operations = {
+const struct file_operations proc_cpuset_operations = {
 	.open		= cpuset_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 66a0ea4..766d591 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -20,7 +20,7 @@
 #include <linux/delayacct.h>
 
 int delayacct_on __read_mostly = 1;	/* Delay accounting turned on/off */
-kmem_cache_t *delayacct_cache;
+struct kmem_cache *delayacct_cache;
 
 static int __init delayacct_setup_disable(char *str)
 {
@@ -41,7 +41,7 @@
 
 void __delayacct_tsk_init(struct task_struct *tsk)
 {
-	tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL);
+	tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
 	if (tsk->delays)
 		spin_lock_init(&tsk->delays->lock);
 }
diff --git a/kernel/dma.c b/kernel/dma.c
index 2020644..937b13c 100644
--- a/kernel/dma.c
+++ b/kernel/dma.c
@@ -140,7 +140,7 @@
 	return single_open(file, proc_dma_show, NULL);
 }
 
-static struct file_operations proc_dma_operations = {
+static const struct file_operations proc_dma_operations = {
 	.open		= proc_dma_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/kernel/exit.c b/kernel/exit.c
index 06de6c4..4e3f919 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -850,9 +850,7 @@
 fastcall NORET_TYPE void do_exit(long code)
 {
 	struct task_struct *tsk = current;
-	struct taskstats *tidstats;
 	int group_dead;
-	unsigned int mycpu;
 
 	profile_task_exit(tsk);
 
@@ -890,8 +888,6 @@
 				current->comm, current->pid,
 				preempt_count());
 
-	taskstats_exit_alloc(&tidstats, &mycpu);
-
 	acct_update_integrals(tsk);
 	if (tsk->mm) {
 		update_hiwater_rss(tsk->mm);
@@ -911,8 +907,8 @@
 #endif
 	if (unlikely(tsk->audit_context))
 		audit_free(tsk);
-	taskstats_exit_send(tsk, tidstats, group_dead, mycpu);
-	taskstats_exit_free(tidstats);
+
+	taskstats_exit(tsk, group_dead);
 
 	exit_mm(tsk);
 
diff --git a/kernel/fork.c b/kernel/fork.c
index fd22245..7f2e31b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -82,26 +82,26 @@
 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
 # define alloc_task_struct()	kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
 # define free_task_struct(tsk)	kmem_cache_free(task_struct_cachep, (tsk))
-static kmem_cache_t *task_struct_cachep;
+static struct kmem_cache *task_struct_cachep;
 #endif
 
 /* SLAB cache for signal_struct structures (tsk->signal) */
-static kmem_cache_t *signal_cachep;
+static struct kmem_cache *signal_cachep;
 
 /* SLAB cache for sighand_struct structures (tsk->sighand) */
-kmem_cache_t *sighand_cachep;
+struct kmem_cache *sighand_cachep;
 
 /* SLAB cache for files_struct structures (tsk->files) */
-kmem_cache_t *files_cachep;
+struct kmem_cache *files_cachep;
 
 /* SLAB cache for fs_struct structures (tsk->fs) */
-kmem_cache_t *fs_cachep;
+struct kmem_cache *fs_cachep;
 
 /* SLAB cache for vm_area_struct structures */
-kmem_cache_t *vm_area_cachep;
+struct kmem_cache *vm_area_cachep;
 
 /* SLAB cache for mm_struct structures (tsk->mm) */
-static kmem_cache_t *mm_cachep;
+static struct kmem_cache *mm_cachep;
 
 void free_task(struct task_struct *tsk)
 {
@@ -237,7 +237,7 @@
 				goto fail_nomem;
 			charge = len;
 		}
-		tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 		if (!tmp)
 			goto fail_nomem;
 		*tmp = *mpnt;
@@ -319,7 +319,7 @@
 
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
 
-#define allocate_mm()	(kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
+#define allocate_mm()	(kmem_cache_alloc(mm_cachep, GFP_KERNEL))
 #define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))
 
 #include <linux/init_task.h>
@@ -448,7 +448,16 @@
 		tsk->vfork_done = NULL;
 		complete(vfork_done);
 	}
-	if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
+
+	/*
+	 * If we're exiting normally, clear a user-space tid field if
+	 * requested.  We leave this alone when dying by signal, to leave
+	 * the value intact in a core dump, and to save the unnecessary
+	 * trouble otherwise.  Userland only wants this done for a sys_exit.
+	 */
+	if (tsk->clear_child_tid
+	    && !(tsk->flags & PF_SIGNALED)
+	    && atomic_read(&mm->mm_users) > 1) {
 		u32 __user * tidptr = tsk->clear_child_tid;
 		tsk->clear_child_tid = NULL;
 
@@ -479,6 +488,10 @@
 
 	memcpy(mm, oldmm, sizeof(*mm));
 
+	/* Initializing for Swap token stuff */
+	mm->token_priority = 0;
+	mm->last_interval = 0;
+
 	if (!mm_init(mm))
 		goto fail_nomem;
 
@@ -542,6 +555,10 @@
 		goto fail_nomem;
 
 good_mm:
+	/* Initializing for Swap token stuff */
+	mm->token_priority = 0;
+	mm->last_interval = 0;
+
 	tsk->mm = mm;
 	tsk->active_mm = mm;
 	return 0;
@@ -613,7 +630,7 @@
 	struct files_struct *newf;
 	struct fdtable *fdt;
 
-	newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
+	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
 	if (!newf)
 		goto out;
 
@@ -830,7 +847,6 @@
 	if (clone_flags & CLONE_THREAD) {
 		atomic_inc(&current->signal->count);
 		atomic_inc(&current->signal->live);
-		taskstats_tgid_alloc(current);
 		return 0;
 	}
 	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
@@ -1413,7 +1429,7 @@
 #define ARCH_MIN_MMSTRUCT_ALIGN 0
 #endif
 
-static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
+static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct sighand_struct *sighand = data;
 
diff --git a/kernel/futex.c b/kernel/futex.c
index 93ef30b..95989a3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -282,9 +282,9 @@
 {
 	int ret;
 
-	inc_preempt_count();
+	pagefault_disable();
 	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-	dec_preempt_count();
+	pagefault_enable();
 
 	return ret ? -EFAULT : 0;
 }
@@ -324,12 +324,11 @@
 	if (likely(current->pi_state_cache))
 		return 0;
 
-	pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL);
+	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
 
 	if (!pi_state)
 		return -ENOMEM;
 
-	memset(pi_state, 0, sizeof(*pi_state));
 	INIT_LIST_HEAD(&pi_state->list);
 	/* pi_mutex gets initialized later */
 	pi_state->owner = NULL;
@@ -553,7 +552,7 @@
 	 * at the end of wake_up_all() does not prevent this store from
 	 * moving.
 	 */
-	wmb();
+	smp_wmb();
 	q->lock_ptr = NULL;
 }
 
@@ -585,9 +584,9 @@
 	if (!(uval & FUTEX_OWNER_DIED)) {
 		newval = FUTEX_WAITERS | new_owner->pid;
 
-		inc_preempt_count();
+		pagefault_disable();
 		curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-		dec_preempt_count();
+		pagefault_enable();
 		if (curval == -EFAULT)
 			return -EFAULT;
 		if (curval != uval)
@@ -618,9 +617,9 @@
 	 * There is no waiter, so we unlock the futex. The owner died
 	 * bit has not to be preserved here. We are the owner:
 	 */
-	inc_preempt_count();
+	pagefault_disable();
 	oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (oldval == -EFAULT)
 		return oldval;
@@ -1158,9 +1157,9 @@
 	 */
 	newval = current->pid;
 
-	inc_preempt_count();
+	pagefault_disable();
 	curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (unlikely(curval == -EFAULT))
 		goto uaddr_faulted;
@@ -1183,9 +1182,9 @@
 	uval = curval;
 	newval = uval | FUTEX_WAITERS;
 
-	inc_preempt_count();
+	pagefault_disable();
 	curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (unlikely(curval == -EFAULT))
 		goto uaddr_faulted;
@@ -1215,10 +1214,10 @@
 			newval = current->pid |
 				FUTEX_OWNER_DIED | FUTEX_WAITERS;
 
-			inc_preempt_count();
+			pagefault_disable();
 			curval = futex_atomic_cmpxchg_inatomic(uaddr,
 							       uval, newval);
-			dec_preempt_count();
+			pagefault_enable();
 
 			if (unlikely(curval == -EFAULT))
 				goto uaddr_faulted;
@@ -1390,9 +1389,9 @@
 	 * anyone else up:
 	 */
 	if (!(uval & FUTEX_OWNER_DIED)) {
-		inc_preempt_count();
+		pagefault_disable();
 		uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
-		dec_preempt_count();
+		pagefault_enable();
 	}
 
 	if (unlikely(uval == -EFAULT))
@@ -1493,7 +1492,7 @@
 	return ret;
 }
 
-static struct file_operations futex_fops = {
+static const struct file_operations futex_fops = {
 	.release	= futex_close,
 	.poll		= futex_poll,
 };
@@ -1858,10 +1857,16 @@
 
 static int __init init(void)
 {
-	unsigned int i;
+	int i = register_filesystem(&futex_fs_type);
 
-	register_filesystem(&futex_fs_type);
+	if (i)
+		return i;
+
 	futex_mnt = kern_mount(&futex_fs_type);
+	if (IS_ERR(futex_mnt)) {
+		unregister_filesystem(&futex_fs_type);
+		return PTR_ERR(futex_mnt);
+	}
 
 	for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
 		INIT_LIST_HEAD(&futex_queues[i].chain);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index a681912..aff1f0f 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -54,7 +54,7 @@
 		.chip = &no_irq_chip,
 		.handle_irq = handle_bad_irq,
 		.depth = 1,
-		.lock = SPIN_LOCK_UNLOCKED,
+		.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
 #ifdef CONFIG_SMP
 		.affinity = CPU_MASK_ALL
 #endif
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index eeac3e3..ab63cfc 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -20,6 +20,7 @@
 #include <linux/proc_fs.h>
 #include <linux/sched.h>	/* for cond_resched */
 #include <linux/mm.h>
+#include <linux/ctype.h>
 
 #include <asm/sections.h>
 
@@ -301,13 +302,6 @@
 	char name[KSYM_NAME_LEN+1];
 };
 
-/* Only label it "global" if it is exported. */
-static void upcase_if_global(struct kallsym_iter *iter)
-{
-	if (is_exported(iter->name, iter->owner))
-		iter->type += 'A' - 'a';
-}
-
 static int get_ksymbol_mod(struct kallsym_iter *iter)
 {
 	iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms,
@@ -316,7 +310,10 @@
 	if (iter->owner == NULL)
 		return 0;
 
-	upcase_if_global(iter);
+	/* Label it "global" if it is exported, "local" if not exported. */
+	iter->type = is_exported(iter->name, iter->owner)
+		? toupper(iter->type) : tolower(iter->type);
+
 	return 1;
 }
 
@@ -401,7 +398,7 @@
 	return 0;
 }
 
-static struct seq_operations kallsyms_op = {
+static const struct seq_operations kallsyms_op = {
 	.start = s_start,
 	.next = s_next,
 	.stop = s_stop,
@@ -436,7 +433,7 @@
 	return seq_release(inode, file);
 }
 
-static struct file_operations kallsyms_operations = {
+static const struct file_operations kallsyms_operations = {
 	.open = kallsyms_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/kernel/kexec.c b/kernel/kexec.c
index fcdd5d2..afbbbe9 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -20,6 +20,8 @@
 #include <linux/syscalls.h>
 #include <linux/ioport.h>
 #include <linux/hardirq.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
@@ -108,11 +110,10 @@
 
 	/* Allocate a controlling structure */
 	result = -ENOMEM;
-	image = kmalloc(sizeof(*image), GFP_KERNEL);
+	image = kzalloc(sizeof(*image), GFP_KERNEL);
 	if (!image)
 		goto out;
 
-	memset(image, 0, sizeof(*image));
 	image->head = 0;
 	image->entry = &image->head;
 	image->last_entry = &image->head;
@@ -1067,6 +1068,60 @@
 	}
 }
 
+static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
+			    size_t data_len)
+{
+	struct elf_note note;
+
+	note.n_namesz = strlen(name) + 1;
+	note.n_descsz = data_len;
+	note.n_type   = type;
+	memcpy(buf, &note, sizeof(note));
+	buf += (sizeof(note) + 3)/4;
+	memcpy(buf, name, note.n_namesz);
+	buf += (note.n_namesz + 3)/4;
+	memcpy(buf, data, note.n_descsz);
+	buf += (note.n_descsz + 3)/4;
+
+	return buf;
+}
+
+static void final_note(u32 *buf)
+{
+	struct elf_note note;
+
+	note.n_namesz = 0;
+	note.n_descsz = 0;
+	note.n_type   = 0;
+	memcpy(buf, &note, sizeof(note));
+}
+
+void crash_save_cpu(struct pt_regs *regs, int cpu)
+{
+	struct elf_prstatus prstatus;
+	u32 *buf;
+
+	if ((cpu < 0) || (cpu >= NR_CPUS))
+		return;
+
+	/* Using ELF notes here is opportunistic.
+	 * I need a well defined structure format
+	 * for the data I pass, and I need tags
+	 * on the data to indicate what information I have
+	 * squirrelled away.  ELF notes happen to provide
+	 * all of that, so there is no need to invent something new.
+	 */
+	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
+	if (!buf)
+		return;
+	memset(&prstatus, 0, sizeof(prstatus));
+	prstatus.pr_pid = current->pid;
+	elf_core_copy_regs(&prstatus.pr_reg, regs);
+	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
+				sizeof(prstatus));
+	final_note(buf);
+}
+
 static int __init crash_notes_memory_init(void)
 {
 	/* Allocate memory for saving cpu registers. */
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 2b76dee..8d2bea0 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -114,6 +114,7 @@
 #endif /* CONFIG_KMOD */
 
 struct subprocess_info {
+	struct work_struct work;
 	struct completion *complete;
 	char *path;
 	char **argv;
@@ -221,9 +222,10 @@
 }
 
 /* This is run by khelper thread  */
-static void __call_usermodehelper(void *data)
+static void __call_usermodehelper(struct work_struct *work)
 {
-	struct subprocess_info *sub_info = data;
+	struct subprocess_info *sub_info =
+		container_of(work, struct subprocess_info, work);
 	pid_t pid;
 	int wait = sub_info->wait;
 
@@ -264,6 +266,8 @@
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	struct subprocess_info sub_info = {
+		.work		= __WORK_INITIALIZER(sub_info.work,
+						     __call_usermodehelper),
 		.complete	= &done,
 		.path		= path,
 		.argv		= argv,
@@ -272,7 +276,6 @@
 		.wait		= wait,
 		.retval		= 0,
 	};
-	DECLARE_WORK(work, __call_usermodehelper, &sub_info);
 
 	if (!khelper_wq)
 		return -EBUSY;
@@ -280,7 +283,7 @@
 	if (path[0] == '\0')
 		return 0;
 
-	queue_work(khelper_wq, &work);
+	queue_work(khelper_wq, &sub_info.work);
 	wait_for_completion(&done);
 	return sub_info.retval;
 }
@@ -291,6 +294,8 @@
 {
 	DECLARE_COMPLETION(done);
 	struct subprocess_info sub_info = {
+		.work		= __WORK_INITIALIZER(sub_info.work,
+						     __call_usermodehelper),
 		.complete	= &done,
 		.path		= path,
 		.argv		= argv,
@@ -298,7 +303,6 @@
 		.retval		= 0,
 	};
 	struct file *f;
-	DECLARE_WORK(work, __call_usermodehelper, &sub_info);
 
 	if (!khelper_wq)
 		return -EBUSY;
@@ -318,7 +322,7 @@
 	}
 	sub_info.stdin = f;
 
-	queue_work(khelper_wq, &work);
+	queue_work(khelper_wq, &sub_info.work);
 	wait_for_completion(&done);
 	return sub_info.retval;
 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 610c837..17ec4af 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -38,6 +38,7 @@
 #include <linux/module.h>
 #include <linux/moduleloader.h>
 #include <linux/kallsyms.h>
+#include <linux/freezer.h>
 #include <asm-generic/sections.h>
 #include <asm/cacheflush.h>
 #include <asm/errno.h>
@@ -83,9 +84,36 @@
 	kprobe_opcode_t *insns;		/* Page of instruction slots */
 	char slot_used[INSNS_PER_PAGE];
 	int nused;
+	int ngarbage;
 };
 
 static struct hlist_head kprobe_insn_pages;
+static int kprobe_garbage_slots;
+static int collect_garbage_slots(void);
+
+static int __kprobes check_safety(void)
+{
+	int ret = 0;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
+	ret = freeze_processes();
+	if (ret == 0) {
+		struct task_struct *p, *q;
+		do_each_thread(p, q) {
+			if (p != current && p->state == TASK_RUNNING &&
+			    p->pid != 0) {
+				printk("Check failed: %s is running\n",p->comm);
+				ret = -1;
+				goto loop_end;
+			}
+		} while_each_thread(p, q);
+	}
+loop_end:
+	thaw_processes();
+#else
+	synchronize_sched();
+#endif
+	return ret;
+}
 
 /**
  * get_insn_slot() - Find a slot on an executable page for an instruction.
@@ -96,6 +124,7 @@
 	struct kprobe_insn_page *kip;
 	struct hlist_node *pos;
 
+      retry:
 	hlist_for_each(pos, &kprobe_insn_pages) {
 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
 		if (kip->nused < INSNS_PER_PAGE) {
@@ -112,7 +141,11 @@
 		}
 	}
 
-	/* All out of space.  Need to allocate a new page. Use slot 0.*/
+	/* If there are any garbage slots, collect it and try again. */
+	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
+		goto retry;
+	}
+	/* All out of space.  Need to allocate a new page. Use slot 0. */
 	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
 	if (!kip) {
 		return NULL;
@@ -133,10 +166,62 @@
 	memset(kip->slot_used, 0, INSNS_PER_PAGE);
 	kip->slot_used[0] = 1;
 	kip->nused = 1;
+	kip->ngarbage = 0;
 	return kip->insns;
 }
 
-void __kprobes free_insn_slot(kprobe_opcode_t *slot)
+/* Return 1 if all garbages are collected, otherwise 0. */
+static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
+{
+	kip->slot_used[idx] = 0;
+	kip->nused--;
+	if (kip->nused == 0) {
+		/*
+		 * Page is no longer in use.  Free it unless
+		 * it's the last one.  We keep the last one
+		 * so as not to have to set it up again the
+		 * next time somebody inserts a probe.
+		 */
+		hlist_del(&kip->hlist);
+		if (hlist_empty(&kprobe_insn_pages)) {
+			INIT_HLIST_NODE(&kip->hlist);
+			hlist_add_head(&kip->hlist,
+				       &kprobe_insn_pages);
+		} else {
+			module_free(NULL, kip->insns);
+			kfree(kip);
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static int __kprobes collect_garbage_slots(void)
+{
+	struct kprobe_insn_page *kip;
+	struct hlist_node *pos, *next;
+
+	/* Ensure no-one is preepmted on the garbages */
+	if (check_safety() != 0)
+		return -EAGAIN;
+
+	hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
+		int i;
+		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
+		if (kip->ngarbage == 0)
+			continue;
+		kip->ngarbage = 0;	/* we will collect all garbages */
+		for (i = 0; i < INSNS_PER_PAGE; i++) {
+			if (kip->slot_used[i] == -1 &&
+			    collect_one_slot(kip, i))
+				break;
+		}
+	}
+	kprobe_garbage_slots = 0;
+	return 0;
+}
+
+void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
 {
 	struct kprobe_insn_page *kip;
 	struct hlist_node *pos;
@@ -146,28 +231,18 @@
 		if (kip->insns <= slot &&
 		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
 			int i = (slot - kip->insns) / MAX_INSN_SIZE;
-			kip->slot_used[i] = 0;
-			kip->nused--;
-			if (kip->nused == 0) {
-				/*
-				 * Page is no longer in use.  Free it unless
-				 * it's the last one.  We keep the last one
-				 * so as not to have to set it up again the
-				 * next time somebody inserts a probe.
-				 */
-				hlist_del(&kip->hlist);
-				if (hlist_empty(&kprobe_insn_pages)) {
-					INIT_HLIST_NODE(&kip->hlist);
-					hlist_add_head(&kip->hlist,
-						&kprobe_insn_pages);
-				} else {
-					module_free(NULL, kip->insns);
-					kfree(kip);
-				}
+			if (dirty) {
+				kip->slot_used[i] = -1;
+				kip->ngarbage++;
+			} else {
+				collect_one_slot(kip, i);
 			}
-			return;
+			break;
 		}
 	}
+	if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
+		collect_garbage_slots();
+	}
 }
 #endif
 
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4f9c60e..1db8c72 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -31,6 +31,8 @@
 	/* Result passed back to kthread_create() from keventd. */
 	struct task_struct *result;
 	struct completion done;
+
+	struct work_struct work;
 };
 
 struct kthread_stop_info
@@ -111,9 +113,10 @@
 }
 
 /* We are keventd: create a thread. */
-static void keventd_create_kthread(void *_create)
+static void keventd_create_kthread(struct work_struct *work)
 {
-	struct kthread_create_info *create = _create;
+	struct kthread_create_info *create =
+		container_of(work, struct kthread_create_info, work);
 	int pid;
 
 	/* We want our own signal handler (we take no signals by default). */
@@ -154,20 +157,20 @@
 				   ...)
 {
 	struct kthread_create_info create;
-	DECLARE_WORK(work, keventd_create_kthread, &create);
 
 	create.threadfn = threadfn;
 	create.data = data;
 	init_completion(&create.started);
 	init_completion(&create.done);
+	INIT_WORK(&create.work, keventd_create_kthread);
 
 	/*
 	 * The workqueue needs to start up first:
 	 */
 	if (!helper_wq)
-		work.func(work.data);
+		create.work.func(&create.work);
 	else {
-		queue_work(helper_wq, &work);
+		queue_work(helper_wq, &create.work);
 		wait_for_completion(&create.done);
 	}
 	if (!IS_ERR(create.result)) {
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 9bb8d78..b020324 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -140,13 +140,6 @@
 
 EXPORT_SYMBOL(lockdep_on);
 
-int lockdep_internal(void)
-{
-	return current->lockdep_recursion != 0;
-}
-
-EXPORT_SYMBOL(lockdep_internal);
-
 /*
  * Debugging switches:
  */
@@ -233,8 +226,10 @@
 	trace->max_entries = trace->nr_entries;
 
 	nr_stack_trace_entries += trace->nr_entries;
-	if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES))
+	if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) {
+		__raw_spin_unlock(&hash_lock);
 		return 0;
+	}
 
 	if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
 		__raw_spin_unlock(&hash_lock);
@@ -353,7 +348,7 @@
 
 static void print_lock_name(struct lock_class *class)
 {
-	char str[128], c1, c2, c3, c4;
+	char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4;
 	const char *name;
 
 	get_usage_chars(class, &c1, &c2, &c3, &c4);
@@ -375,7 +370,7 @@
 static void print_lockdep_cache(struct lockdep_map *lock)
 {
 	const char *name;
-	char str[128];
+	char str[KSYM_NAME_LEN + 1];
 
 	name = lock->name;
 	if (!name)
@@ -445,7 +440,9 @@
 	print_lock_class_header(class, depth);
 
 	list_for_each_entry(entry, &class->locks_after, entry) {
-		DEBUG_LOCKS_WARN_ON(!entry->class);
+		if (DEBUG_LOCKS_WARN_ON(!entry->class))
+			return;
+
 		print_lock_dependencies(entry->class, depth + 1);
 
 		printk("%*s ... acquired at:\n",depth,"");
@@ -470,7 +467,8 @@
 		return 0;
 
 	entry->class = this;
-	save_trace(&entry->trace);
+	if (!save_trace(&entry->trace))
+		return 0;
 
 	/*
 	 * Since we never remove from the dependency list, the list can
@@ -558,8 +556,12 @@
 	if (debug_locks_silent)
 		return 0;
 
+	/* hash_lock unlocked by the header */
+	__raw_spin_lock(&hash_lock);
 	this.class = check_source->class;
-	save_trace(&this.trace);
+	if (!save_trace(&this.trace))
+		return 0;
+	__raw_spin_unlock(&hash_lock);
 	print_circular_bug_entry(&this, 0);
 
 	printk("\nother info that might help us debug this:\n\n");
@@ -962,14 +964,11 @@
 			       &prev->class->locks_after, next->acquire_ip);
 	if (!ret)
 		return 0;
-	/*
-	 * Return value of 2 signals 'dependency already added',
-	 * in that case we dont have to add the backlink either.
-	 */
-	if (ret == 2)
-		return 2;
+
 	ret = add_lock_to_list(next->class, prev->class,
 			       &next->class->locks_before, next->acquire_ip);
+	if (!ret)
+		return 0;
 
 	/*
 	 * Debugging printouts:
@@ -1021,7 +1020,8 @@
 		 * added:
 		 */
 		if (hlock->read != 2) {
-			check_prev_add(curr, hlock, next);
+			if (!check_prev_add(curr, hlock, next))
+				return 0;
 			/*
 			 * Stop after the first non-trylock entry,
 			 * as non-trylock entries have added their
@@ -1178,6 +1178,7 @@
 	struct lockdep_subclass_key *key;
 	struct list_head *hash_head;
 	struct lock_class *class;
+	unsigned long flags;
 
 	class = look_up_lock_class(lock, subclass);
 	if (likely(class))
@@ -1199,6 +1200,7 @@
 	key = lock->key->subkeys + subclass;
 	hash_head = classhashentry(key);
 
+	raw_local_irq_save(flags);
 	__raw_spin_lock(&hash_lock);
 	/*
 	 * We have to do the hash-walk again, to avoid races
@@ -1213,6 +1215,7 @@
 	 */
 	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
 		__raw_spin_unlock(&hash_lock);
+		raw_local_irq_restore(flags);
 		debug_locks_off();
 		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
 		printk("turning off the locking correctness validator.\n");
@@ -1235,15 +1238,18 @@
 
 	if (verbose(class)) {
 		__raw_spin_unlock(&hash_lock);
+		raw_local_irq_restore(flags);
 		printk("\nnew class %p: %s", class->key, class->name);
 		if (class->name_version > 1)
 			printk("#%d", class->name_version);
 		printk("\n");
 		dump_stack();
+		raw_local_irq_save(flags);
 		__raw_spin_lock(&hash_lock);
 	}
 out_unlock_set:
 	__raw_spin_unlock(&hash_lock);
+	raw_local_irq_restore(flags);
 
 	if (!subclass || force)
 		lock->class_cache = class;
@@ -1724,6 +1730,7 @@
 		debug_atomic_dec(&nr_unused_locks);
 		break;
 	default:
+		__raw_spin_unlock(&hash_lock);
 		debug_locks_off();
 		WARN_ON(1);
 		return 0;
@@ -2641,6 +2648,7 @@
 	}
 	local_irq_restore(flags);
 }
+EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
 static void print_held_locks_bug(struct task_struct *curr)
 {
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index eab043c..8ce09bc 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -20,7 +20,7 @@
 #define MAX_LOCKDEP_KEYS_BITS	11
 #define MAX_LOCKDEP_KEYS	(1UL << MAX_LOCKDEP_KEYS_BITS)
 
-#define MAX_LOCKDEP_CHAINS_BITS	13
+#define MAX_LOCKDEP_CHAINS_BITS	14
 #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
 
 /*
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index f6e72ea..b554b40 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -113,7 +113,7 @@
 	return 0;
 }
 
-static struct seq_operations lockdep_ops = {
+static const struct seq_operations lockdep_ops = {
 	.start	= l_start,
 	.next	= l_next,
 	.stop	= l_stop,
@@ -135,7 +135,7 @@
 	return res;
 }
 
-static struct file_operations proc_lockdep_operations = {
+static const struct file_operations proc_lockdep_operations = {
 	.open		= lockdep_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -319,7 +319,7 @@
 	return single_open(file, lockdep_stats_show, NULL);
 }
 
-static struct file_operations proc_lockdep_stats_operations = {
+static const struct file_operations proc_lockdep_stats_operations = {
 	.open		= lockdep_stats_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/kernel/module.c b/kernel/module.c
index e2d09d6..d9eae45 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2209,7 +2209,7 @@
    Where refcount is a number or -, and deps is a comma-separated list
    of depends or -.
 */
-struct seq_operations modules_op = {
+const struct seq_operations modules_op = {
 	.start	= m_start,
 	.next	= m_next,
 	.stop	= m_stop,
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 1865164..841539d 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -77,6 +77,9 @@
 
 void debug_mutex_unlock(struct mutex *lock)
 {
+	if (unlikely(!debug_locks))
+		return;
+
 	DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
 	DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
diff --git a/kernel/pid.c b/kernel/pid.c
index b914392..a48879b 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -31,7 +31,7 @@
 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
 static struct hlist_head *pid_hash;
 static int pidhash_shift;
-static kmem_cache_t *pid_cachep;
+static struct kmem_cache *pid_cachep;
 
 int pid_max = PID_MAX_DEFAULT;
 
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 9cbb5d1..5fe87de 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -70,7 +70,7 @@
 /*
  * Lets keep our timers in a slab cache :-)
  */
-static kmem_cache_t *posix_timers_cache;
+static struct kmem_cache *posix_timers_cache;
 static struct idr posix_timers_id;
 static DEFINE_SPINLOCK(idr_lock);
 
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 825068c..710ed08 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -78,7 +78,7 @@
 
 config SOFTWARE_SUSPEND
 	bool "Software Suspend"
-	depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP) && !X86_PAE) || ((FRV || PPC32) && !SMP))
+	depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
 	---help---
 	  Enable the possibility of suspending the machine.
 	  It doesn't need ACPI or APM.
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index b1fb786..0b00f56 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -20,6 +20,7 @@
 #include <linux/pm.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
+#include <linux/freezer.h>
 
 #include "power.h"
 
@@ -27,6 +28,23 @@
 static int noresume = 0;
 char resume_file[256] = CONFIG_PM_STD_PARTITION;
 dev_t swsusp_resume_device;
+sector_t swsusp_resume_block;
+
+/**
+ *	platform_prepare - prepare the machine for hibernation using the
+ *	platform driver if so configured and return an error code if it fails
+ */
+
+static inline int platform_prepare(void)
+{
+	int error = 0;
+
+	if (pm_disk_mode == PM_DISK_PLATFORM) {
+		if (pm_ops && pm_ops->prepare)
+			error = pm_ops->prepare(PM_SUSPEND_DISK);
+	}
+	return error;
+}
 
 /**
  *	power_down - Shut machine down for hibernate.
@@ -40,12 +58,10 @@
 
 static void power_down(suspend_disk_method_t mode)
 {
-	int error = 0;
-
 	switch(mode) {
 	case PM_DISK_PLATFORM:
 		kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
-		error = pm_ops->enter(PM_SUSPEND_DISK);
+		pm_ops->enter(PM_SUSPEND_DISK);
 		break;
 	case PM_DISK_SHUTDOWN:
 		kernel_power_off();
@@ -90,12 +106,18 @@
 		goto thaw;
 	}
 
+	error = platform_prepare();
+	if (error)
+		goto thaw;
+
 	/* Free memory before shutting down devices. */
 	if (!(error = swsusp_shrink_memory()))
 		return 0;
-thaw:
+
+	platform_finish();
+ thaw:
 	thaw_processes();
-enable_cpus:
+ enable_cpus:
 	enable_nonboot_cpus();
 	pm_restore_console();
 	return error;
@@ -127,7 +149,7 @@
 		return error;
 
 	if (pm_disk_mode == PM_DISK_TESTPROC)
-		goto Thaw;
+		return 0;
 
 	suspend_console();
 	error = device_suspend(PMSG_FREEZE);
@@ -189,10 +211,10 @@
 {
 	int error;
 
-	down(&pm_sem);
+	mutex_lock(&pm_mutex);
 	if (!swsusp_resume_device) {
 		if (!strlen(resume_file)) {
-			up(&pm_sem);
+			mutex_unlock(&pm_mutex);
 			return -ENOENT;
 		}
 		swsusp_resume_device = name_to_dev_t(resume_file);
@@ -207,7 +229,7 @@
 		 * FIXME: If noresume is specified, we need to find the partition
 		 * and reset it back to normal swap space.
 		 */
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		return 0;
 	}
 
@@ -251,7 +273,7 @@
 	unprepare_processes();
  Done:
 	/* For success case, the suspend path will release the lock */
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 	pr_debug("PM: Resume from disk failed.\n");
 	return 0;
 }
@@ -312,7 +334,7 @@
 	p = memchr(buf, '\n', n);
 	len = p ? p - buf : n;
 
-	down(&pm_sem);
+	mutex_lock(&pm_mutex);
 	for (i = PM_DISK_FIRMWARE; i < PM_DISK_MAX; i++) {
 		if (!strncmp(buf, pm_disk_modes[i], len)) {
 			mode = i;
@@ -336,7 +358,7 @@
 
 	pr_debug("PM: suspend-to-disk mode set to '%s'\n",
 		 pm_disk_modes[mode]);
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 	return error ? error : n;
 }
 
@@ -361,14 +383,14 @@
 	if (maj != MAJOR(res) || min != MINOR(res))
 		goto out;
 
-	down(&pm_sem);
+	mutex_lock(&pm_mutex);
 	swsusp_resume_device = res;
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 	printk("Attempting manual resume\n");
 	noresume = 0;
 	software_resume();
 	ret = n;
-out:
+ out:
 	return ret;
 }
 
@@ -423,6 +445,19 @@
 	return 1;
 }
 
+static int __init resume_offset_setup(char *str)
+{
+	unsigned long long offset;
+
+	if (noresume)
+		return 1;
+
+	if (sscanf(str, "%llu", &offset) == 1)
+		swsusp_resume_block = offset;
+
+	return 1;
+}
+
 static int __init noresume_setup(char *str)
 {
 	noresume = 1;
@@ -430,4 +465,5 @@
 }
 
 __setup("noresume", noresume_setup);
+__setup("resume_offset=", resume_offset_setup);
 __setup("resume=", resume_setup);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 873228c..500eb87 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/kobject.h>
 #include <linux/string.h>
@@ -18,13 +19,14 @@
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/resume-trace.h>
+#include <linux/freezer.h>
 
 #include "power.h"
 
 /*This is just an arbitrary number */
 #define FREE_PAGE_NUMBER (100)
 
-DECLARE_MUTEX(pm_sem);
+DEFINE_MUTEX(pm_mutex);
 
 struct pm_ops *pm_ops;
 suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN;
@@ -36,9 +38,9 @@
 
 void pm_set_ops(struct pm_ops * ops)
 {
-	down(&pm_sem);
+	mutex_lock(&pm_mutex);
 	pm_ops = ops;
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 }
 
 
@@ -182,7 +184,7 @@
 
 	if (!valid_state(state))
 		return -ENODEV;
-	if (down_trylock(&pm_sem))
+	if (!mutex_trylock(&pm_mutex))
 		return -EBUSY;
 
 	if (state == PM_SUSPEND_DISK) {
@@ -200,7 +202,7 @@
 	pr_debug("PM: Finishing wakeup.\n");
 	suspend_finish(state);
  Unlock:
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 	return error;
 }
 
@@ -229,7 +231,7 @@
 	return -EINVAL;
 }
 
-
+EXPORT_SYMBOL(pm_suspend);
 
 decl_subsys(power,NULL,NULL);
 
diff --git a/kernel/power/power.h b/kernel/power/power.h
index bfe999f..eb461b8 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -22,7 +22,9 @@
 	return -EPERM;
 }
 #endif
-extern struct semaphore pm_sem;
+
+extern struct mutex pm_mutex;
+
 #define power_attr(_name) \
 static struct subsys_attribute _name##_attr = {	\
 	.attr	= {				\
@@ -42,6 +44,7 @@
 extern unsigned long image_size;
 extern int in_suspend;
 extern dev_t swsusp_resume_device;
+extern sector_t swsusp_resume_block;
 
 extern asmlinkage int swsusp_arch_suspend(void);
 extern asmlinkage int swsusp_arch_resume(void);
@@ -102,8 +105,18 @@
 extern unsigned int snapshot_additional_pages(struct zone *zone);
 extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
 extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
+extern void snapshot_write_finalize(struct snapshot_handle *handle);
 extern int snapshot_image_loaded(struct snapshot_handle *handle);
-extern void snapshot_free_unused_memory(struct snapshot_handle *handle);
+
+/*
+ * This structure is used to pass the values needed for the identification
+ * of the resume swap area from a user space to the kernel via the
+ * SNAPSHOT_SET_SWAP_AREA ioctl
+ */
+struct resume_swap_area {
+	loff_t offset;
+	u_int32_t dev;
+} __attribute__((packed));
 
 #define SNAPSHOT_IOC_MAGIC	'3'
 #define SNAPSHOT_FREEZE			_IO(SNAPSHOT_IOC_MAGIC, 1)
@@ -117,7 +130,14 @@
 #define SNAPSHOT_FREE_SWAP_PAGES	_IO(SNAPSHOT_IOC_MAGIC, 9)
 #define SNAPSHOT_SET_SWAP_FILE		_IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
 #define SNAPSHOT_S2RAM			_IO(SNAPSHOT_IOC_MAGIC, 11)
-#define SNAPSHOT_IOC_MAXNR	11
+#define SNAPSHOT_PMOPS			_IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
+#define SNAPSHOT_SET_SWAP_AREA		_IOW(SNAPSHOT_IOC_MAGIC, 13, \
+							struct resume_swap_area)
+#define SNAPSHOT_IOC_MAXNR	13
+
+#define PMOPS_PREPARE	1
+#define PMOPS_ENTER	2
+#define PMOPS_FINISH	3
 
 /**
  *	The bitmap is used for tracing allocated swap pages
@@ -141,7 +161,7 @@
 
 extern void free_bitmap(struct bitmap_page *bitmap);
 extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits);
-extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap);
+extern sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap);
 extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap);
 
 extern int swsusp_check(void);
@@ -153,3 +173,7 @@
 extern int swsusp_write(void);
 extern void swsusp_close(void);
 extern int suspend_enter(suspend_state_t state);
+
+struct timeval;
+extern void swsusp_show_speed(struct timeval *, struct timeval *,
+				unsigned int, char *);
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index f1f900a..678ec73 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -16,12 +16,12 @@
  * callback we use.
  */
 
-static void do_poweroff(void *dummy)
+static void do_poweroff(struct work_struct *dummy)
 {
 	kernel_power_off();
 }
 
-static DECLARE_WORK(poweroff_work, do_poweroff, NULL);
+static DECLARE_WORK(poweroff_work, do_poweroff);
 
 static void handle_poweroff(int key, struct tty_struct *tty)
 {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 72e72d2..99eeb11 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -13,12 +13,15 @@
 #include <linux/suspend.h>
 #include <linux/module.h>
 #include <linux/syscalls.h>
+#include <linux/freezer.h>
 
 /* 
  * Timeout for stopping processes
  */
 #define TIMEOUT	(20 * HZ)
 
+#define FREEZER_KERNEL_THREADS 0
+#define FREEZER_USER_SPACE 1
 
 static inline int freezeable(struct task_struct * p)
 {
@@ -39,7 +42,6 @@
 	long save;
 	save = current->state;
 	pr_debug("%s entered refrigerator\n", current->comm);
-	printk("=");
 
 	frozen_process(current);
 	spin_lock_irq(&current->sighand->siglock);
@@ -79,96 +81,136 @@
 	}
 }
 
-/* 0 = success, else # of processes that we failed to stop */
-int freeze_processes(void)
+static inline int is_user_space(struct task_struct *p)
 {
-	int todo, nr_user, user_frozen;
-	unsigned long start_time;
-	struct task_struct *g, *p;
+	return p->mm && !(p->flags & PF_BORROWED_MM);
+}
 
-	printk( "Stopping tasks: " );
-	start_time = jiffies;
-	user_frozen = 0;
+static unsigned int try_to_freeze_tasks(int freeze_user_space)
+{
+	struct task_struct *g, *p;
+	unsigned long end_time;
+	unsigned int todo;
+
+	end_time = jiffies + TIMEOUT;
 	do {
-		nr_user = todo = 0;
+		todo = 0;
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
 			if (!freezeable(p))
 				continue;
+
 			if (frozen(p))
 				continue;
-			if (p->state == TASK_TRACED && frozen(p->parent)) {
+
+			if (p->state == TASK_TRACED &&
+			    (frozen(p->parent) ||
+			     p->parent->state == TASK_STOPPED)) {
 				cancel_freezing(p);
 				continue;
 			}
-			if (p->mm && !(p->flags & PF_BORROWED_MM)) {
-				/* The task is a user-space one.
-				 * Freeze it unless there's a vfork completion
-				 * pending
+			if (is_user_space(p)) {
+				if (!freeze_user_space)
+					continue;
+
+				/* Freeze the task unless there is a vfork
+				 * completion pending
 				 */
 				if (!p->vfork_done)
 					freeze_process(p);
-				nr_user++;
 			} else {
-				/* Freeze only if the user space is frozen */
-				if (user_frozen)
-					freeze_process(p);
-				todo++;
+				if (freeze_user_space)
+					continue;
+
+				freeze_process(p);
 			}
+			todo++;
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
-		todo += nr_user;
-		if (!user_frozen && !nr_user) {
-			sys_sync();
-			start_time = jiffies;
-		}
-		user_frozen = !nr_user;
 		yield();			/* Yield is okay here */
-		if (todo && time_after(jiffies, start_time + TIMEOUT))
+		if (todo && time_after(jiffies, end_time))
 			break;
-	} while(todo);
+	} while (todo);
 
-	/* This does not unfreeze processes that are already frozen
-	 * (we have slightly ugly calling convention in that respect,
-	 * and caller must call thaw_processes() if something fails),
-	 * but it cleans up leftover PF_FREEZE requests.
-	 */
 	if (todo) {
-		printk( "\n" );
-		printk(KERN_ERR " stopping tasks timed out "
-			"after %d seconds (%d tasks remaining):\n",
-			TIMEOUT / HZ, todo);
+		/* This does not unfreeze processes that are already frozen
+		 * (we have slightly ugly calling convention in that respect,
+		 * and caller must call thaw_processes() if something fails),
+		 * but it cleans up leftover PF_FREEZE requests.
+		 */
+		printk("\n");
+		printk(KERN_ERR "Stopping %s timed out after %d seconds "
+				"(%d tasks refusing to freeze):\n",
+				freeze_user_space ? "user space processes" :
+					"kernel threads",
+				TIMEOUT / HZ, todo);
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
+			if (is_user_space(p) == !freeze_user_space)
+				continue;
+
 			if (freezeable(p) && !frozen(p))
-				printk(KERN_ERR "  %s\n", p->comm);
+				printk(KERN_ERR " %s\n", p->comm);
+
 			cancel_freezing(p);
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
-		return todo;
 	}
 
-	printk( "|\n" );
+	return todo;
+}
+
+/**
+ *	freeze_processes - tell processes to enter the refrigerator
+ *
+ *	Returns 0 on success, or the number of processes that didn't freeze,
+ *	although they were told to.
+ */
+int freeze_processes(void)
+{
+	unsigned int nr_unfrozen;
+
+	printk("Stopping tasks ... ");
+	nr_unfrozen = try_to_freeze_tasks(FREEZER_USER_SPACE);
+	if (nr_unfrozen)
+		return nr_unfrozen;
+
+	sys_sync();
+	nr_unfrozen = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
+	if (nr_unfrozen)
+		return nr_unfrozen;
+
+	printk("done.\n");
 	BUG_ON(in_atomic());
 	return 0;
 }
 
-void thaw_processes(void)
+static void thaw_tasks(int thaw_user_space)
 {
 	struct task_struct *g, *p;
 
-	printk( "Restarting tasks..." );
 	read_lock(&tasklist_lock);
 	do_each_thread(g, p) {
 		if (!freezeable(p))
 			continue;
-		if (!thaw_process(p))
-			printk(KERN_INFO " Strange, %s not stopped\n", p->comm );
-	} while_each_thread(g, p);
 
+		if (is_user_space(p) == !thaw_user_space)
+			continue;
+
+		if (!thaw_process(p))
+			printk(KERN_WARNING " Strange, %s not stopped\n",
+				p->comm );
+	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
+}
+
+void thaw_processes(void)
+{
+	printk("Restarting tasks ... ");
+	thaw_tasks(FREEZER_KERNEL_THREADS);
+	thaw_tasks(FREEZER_USER_SPACE);
 	schedule();
-	printk( " done\n" );
+	printk("done.\n");
 }
 
 EXPORT_SYMBOL(refrigerator);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 99f9b7d..c024606 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1,15 +1,15 @@
 /*
  * linux/kernel/power/snapshot.c
  *
- * This file provide system snapshot/restore functionality.
+ * This file provides system snapshot/restore functionality for swsusp.
  *
  * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  *
- * This file is released under the GPLv2, and is based on swsusp.c.
+ * This file is released under the GPLv2.
  *
  */
 
-
 #include <linux/version.h>
 #include <linux/module.h>
 #include <linux/mm.h>
@@ -34,137 +34,24 @@
 
 #include "power.h"
 
-/* List of PBEs used for creating and restoring the suspend image */
+/* List of PBEs needed for restoring the pages that were allocated before
+ * the suspend and included in the suspend image, but have also been
+ * allocated by the "resume" kernel, so their contents cannot be written
+ * directly to their "original" page frames.
+ */
 struct pbe *restore_pblist;
 
-static unsigned int nr_copy_pages;
-static unsigned int nr_meta_pages;
+/* Pointer to an auxiliary buffer (1 page) */
 static void *buffer;
 
-#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void)
-{
-	struct zone *zone;
-	unsigned long zone_pfn;
-	unsigned int n = 0;
-
-	for_each_zone (zone)
-		if (is_highmem(zone)) {
-			mark_free_pages(zone);
-			for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
-				struct page *page;
-				unsigned long pfn = zone_pfn + zone->zone_start_pfn;
-				if (!pfn_valid(pfn))
-					continue;
-				page = pfn_to_page(pfn);
-				if (PageReserved(page))
-					continue;
-				if (PageNosaveFree(page))
-					continue;
-				n++;
-			}
-		}
-	return n;
-}
-
-struct highmem_page {
-	char *data;
-	struct page *page;
-	struct highmem_page *next;
-};
-
-static struct highmem_page *highmem_copy;
-
-static int save_highmem_zone(struct zone *zone)
-{
-	unsigned long zone_pfn;
-	mark_free_pages(zone);
-	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
-		struct page *page;
-		struct highmem_page *save;
-		void *kaddr;
-		unsigned long pfn = zone_pfn + zone->zone_start_pfn;
-
-		if (!(pfn%10000))
-			printk(".");
-		if (!pfn_valid(pfn))
-			continue;
-		page = pfn_to_page(pfn);
-		/*
-		 * This condition results from rvmalloc() sans vmalloc_32()
-		 * and architectural memory reservations. This should be
-		 * corrected eventually when the cases giving rise to this
-		 * are better understood.
-		 */
-		if (PageReserved(page))
-			continue;
-		BUG_ON(PageNosave(page));
-		if (PageNosaveFree(page))
-			continue;
-		save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
-		if (!save)
-			return -ENOMEM;
-		save->next = highmem_copy;
-		save->page = page;
-		save->data = (void *) get_zeroed_page(GFP_ATOMIC);
-		if (!save->data) {
-			kfree(save);
-			return -ENOMEM;
-		}
-		kaddr = kmap_atomic(page, KM_USER0);
-		memcpy(save->data, kaddr, PAGE_SIZE);
-		kunmap_atomic(kaddr, KM_USER0);
-		highmem_copy = save;
-	}
-	return 0;
-}
-
-int save_highmem(void)
-{
-	struct zone *zone;
-	int res = 0;
-
-	pr_debug("swsusp: Saving Highmem");
-	drain_local_pages();
-	for_each_zone (zone) {
-		if (is_highmem(zone))
-			res = save_highmem_zone(zone);
-		if (res)
-			return res;
-	}
-	printk("\n");
-	return 0;
-}
-
-int restore_highmem(void)
-{
-	printk("swsusp: Restoring Highmem\n");
-	while (highmem_copy) {
-		struct highmem_page *save = highmem_copy;
-		void *kaddr;
-		highmem_copy = save->next;
-
-		kaddr = kmap_atomic(save->page, KM_USER0);
-		memcpy(kaddr, save->data, PAGE_SIZE);
-		kunmap_atomic(kaddr, KM_USER0);
-		free_page((long) save->data);
-		kfree(save);
-	}
-	return 0;
-}
-#else
-static inline unsigned int count_highmem_pages(void) {return 0;}
-static inline int save_highmem(void) {return 0;}
-static inline int restore_highmem(void) {return 0;}
-#endif
-
 /**
  *	@safe_needed - on resume, for storing the PBE list and the image,
  *	we can only use memory pages that do not conflict with the pages
- *	used before suspend.
+ *	used before suspend.  The unsafe pages have PageNosaveFree set
+ *	and we count them using unsafe_pages.
  *
- *	The unsafe pages are marked with the PG_nosave_free flag
- *	and we count them using unsafe_pages
+ *	Each allocated image page is marked as PageNosave and PageNosaveFree
+ *	so that swsusp_free() can release it.
  */
 
 #define PG_ANY		0
@@ -174,7 +61,7 @@
 
 static unsigned int allocated_unsafe_pages;
 
-static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
+static void *get_image_page(gfp_t gfp_mask, int safe_needed)
 {
 	void *res;
 
@@ -195,20 +82,39 @@
 
 unsigned long get_safe_page(gfp_t gfp_mask)
 {
-	return (unsigned long)alloc_image_page(gfp_mask, PG_SAFE);
+	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
+}
+
+static struct page *alloc_image_page(gfp_t gfp_mask)
+{
+	struct page *page;
+
+	page = alloc_page(gfp_mask);
+	if (page) {
+		SetPageNosave(page);
+		SetPageNosaveFree(page);
+	}
+	return page;
 }
 
 /**
  *	free_image_page - free page represented by @addr, allocated with
- *	alloc_image_page (page flags set by it must be cleared)
+ *	get_image_page (page flags set by it must be cleared)
  */
 
 static inline void free_image_page(void *addr, int clear_nosave_free)
 {
-	ClearPageNosave(virt_to_page(addr));
+	struct page *page;
+
+	BUG_ON(!virt_addr_valid(addr));
+
+	page = virt_to_page(addr);
+
+	ClearPageNosave(page);
 	if (clear_nosave_free)
-		ClearPageNosaveFree(virt_to_page(addr));
-	free_page((unsigned long)addr);
+		ClearPageNosaveFree(page);
+
+	__free_page(page);
 }
 
 /* struct linked_page is used to build chains of pages */
@@ -269,7 +175,7 @@
 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 		struct linked_page *lp;
 
-		lp = alloc_image_page(ca->gfp_mask, ca->safe_needed);
+		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
 		if (!lp)
 			return NULL;
 
@@ -446,8 +352,8 @@
 
 	/* Compute the number of zones */
 	nr = 0;
-	for_each_zone (zone)
-		if (populated_zone(zone) && !is_highmem(zone))
+	for_each_zone(zone)
+		if (populated_zone(zone))
 			nr++;
 
 	/* Allocate the list of zones bitmap objects */
@@ -459,10 +365,10 @@
 	}
 
 	/* Initialize the zone bitmap objects */
-	for_each_zone (zone) {
+	for_each_zone(zone) {
 		unsigned long pfn;
 
-		if (!populated_zone(zone) || is_highmem(zone))
+		if (!populated_zone(zone))
 			continue;
 
 		zone_bm->start_pfn = zone->zone_start_pfn;
@@ -481,7 +387,7 @@
 		while (bb) {
 			unsigned long *ptr;
 
-			ptr = alloc_image_page(gfp_mask, safe_needed);
+			ptr = get_image_page(gfp_mask, safe_needed);
 			bb->data = ptr;
 			if (!ptr)
 				goto Free;
@@ -505,7 +411,7 @@
 	memory_bm_position_reset(bm);
 	return 0;
 
-Free:
+ Free:
 	bm->p_list = ca.chain;
 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
 	return -ENOMEM;
@@ -651,7 +557,7 @@
 	memory_bm_position_reset(bm);
 	return BM_END_OF_MAP;
 
-Return_pfn:
+ Return_pfn:
 	bm->cur.chunk = chunk;
 	bm->cur.bit = bit;
 	return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
@@ -669,9 +575,81 @@
 
 	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
 	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
-	return res;
+	return 2 * res;
 }
 
+#ifdef CONFIG_HIGHMEM
+/**
+ *	count_free_highmem_pages - compute the total number of free highmem
+ *	pages, system-wide.
+ */
+
+static unsigned int count_free_highmem_pages(void)
+{
+	struct zone *zone;
+	unsigned int cnt = 0;
+
+	for_each_zone(zone)
+		if (populated_zone(zone) && is_highmem(zone))
+			cnt += zone->free_pages;
+
+	return cnt;
+}
+
+/**
+ *	saveable_highmem_page - Determine whether a highmem page should be
+ *	included in the suspend image.
+ *
+ *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
+ *	and it isn't a part of a free chunk of pages.
+ */
+
+static struct page *saveable_highmem_page(unsigned long pfn)
+{
+	struct page *page;
+
+	if (!pfn_valid(pfn))
+		return NULL;
+
+	page = pfn_to_page(pfn);
+
+	BUG_ON(!PageHighMem(page));
+
+	if (PageNosave(page) || PageReserved(page) || PageNosaveFree(page))
+		return NULL;
+
+	return page;
+}
+
+/**
+ *	count_highmem_pages - compute the total number of saveable highmem
+ *	pages.
+ */
+
+unsigned int count_highmem_pages(void)
+{
+	struct zone *zone;
+	unsigned int n = 0;
+
+	for_each_zone(zone) {
+		unsigned long pfn, max_zone_pfn;
+
+		if (!is_highmem(zone))
+			continue;
+
+		mark_free_pages(zone);
+		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
+			if (saveable_highmem_page(pfn))
+				n++;
+	}
+	return n;
+}
+#else
+static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
+static inline unsigned int count_highmem_pages(void) { return 0; }
+#endif /* CONFIG_HIGHMEM */
+
 /**
  *	pfn_is_nosave - check if given pfn is in the 'nosave' section
  */
@@ -684,12 +662,12 @@
 }
 
 /**
- *	saveable - Determine whether a page should be cloned or not.
- *	@pfn:	The page
+ *	saveable - Determine whether a non-highmem page should be included in
+ *	the suspend image.
  *
- *	We save a page if it isn't Nosave, and is not in the range of pages
- *	statically defined as 'unsaveable', and it
- *	isn't a part of a free chunk of pages.
+ *	We should save the page if it isn't Nosave, and is not in the range
+ *	of pages statically defined as 'unsaveable', and it isn't a part of
+ *	a free chunk of pages.
  */
 
 static struct page *saveable_page(unsigned long pfn)
@@ -701,76 +679,130 @@
 
 	page = pfn_to_page(pfn);
 
-	if (PageNosave(page))
+	BUG_ON(PageHighMem(page));
+
+	if (PageNosave(page) || PageNosaveFree(page))
 		return NULL;
+
 	if (PageReserved(page) && pfn_is_nosave(pfn))
 		return NULL;
-	if (PageNosaveFree(page))
-		return NULL;
 
 	return page;
 }
 
+/**
+ *	count_data_pages - compute the total number of saveable non-highmem
+ *	pages.
+ */
+
 unsigned int count_data_pages(void)
 {
 	struct zone *zone;
 	unsigned long pfn, max_zone_pfn;
 	unsigned int n = 0;
 
-	for_each_zone (zone) {
+	for_each_zone(zone) {
 		if (is_highmem(zone))
 			continue;
+
 		mark_free_pages(zone);
 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-			n += !!saveable_page(pfn);
+			if(saveable_page(pfn))
+				n++;
 	}
 	return n;
 }
 
-static inline void copy_data_page(long *dst, long *src)
+/* This is needed, because copy_page and memcpy are not usable for copying
+ * task structs.
+ */
+static inline void do_copy_page(long *dst, long *src)
 {
 	int n;
 
-	/* copy_page and memcpy are not usable for copying task structs. */
 	for (n = PAGE_SIZE / sizeof(long); n; n--)
 		*dst++ = *src++;
 }
 
+#ifdef CONFIG_HIGHMEM
+static inline struct page *
+page_is_saveable(struct zone *zone, unsigned long pfn)
+{
+	return is_highmem(zone) ?
+			saveable_highmem_page(pfn) : saveable_page(pfn);
+}
+
+static inline void
+copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+{
+	struct page *s_page, *d_page;
+	void *src, *dst;
+
+	s_page = pfn_to_page(src_pfn);
+	d_page = pfn_to_page(dst_pfn);
+	if (PageHighMem(s_page)) {
+		src = kmap_atomic(s_page, KM_USER0);
+		dst = kmap_atomic(d_page, KM_USER1);
+		do_copy_page(dst, src);
+		kunmap_atomic(src, KM_USER0);
+		kunmap_atomic(dst, KM_USER1);
+	} else {
+		src = page_address(s_page);
+		if (PageHighMem(d_page)) {
+			/* Page pointed to by src may contain some kernel
+			 * data modified by kmap_atomic()
+			 */
+			do_copy_page(buffer, src);
+			dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
+			memcpy(dst, buffer, PAGE_SIZE);
+			kunmap_atomic(dst, KM_USER0);
+		} else {
+			dst = page_address(d_page);
+			do_copy_page(dst, src);
+		}
+	}
+}
+#else
+#define page_is_saveable(zone, pfn)	saveable_page(pfn)
+
+static inline void
+copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+{
+	do_copy_page(page_address(pfn_to_page(dst_pfn)),
+			page_address(pfn_to_page(src_pfn)));
+}
+#endif /* CONFIG_HIGHMEM */
+
 static void
 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
 {
 	struct zone *zone;
 	unsigned long pfn;
 
-	for_each_zone (zone) {
+	for_each_zone(zone) {
 		unsigned long max_zone_pfn;
 
-		if (is_highmem(zone))
-			continue;
-
 		mark_free_pages(zone);
 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-			if (saveable_page(pfn))
+			if (page_is_saveable(zone, pfn))
 				memory_bm_set_bit(orig_bm, pfn);
 	}
 	memory_bm_position_reset(orig_bm);
 	memory_bm_position_reset(copy_bm);
 	do {
 		pfn = memory_bm_next_pfn(orig_bm);
-		if (likely(pfn != BM_END_OF_MAP)) {
-			struct page *page;
-			void *src;
-
-			page = pfn_to_page(pfn);
-			src = page_address(page);
-			page = pfn_to_page(memory_bm_next_pfn(copy_bm));
-			copy_data_page(page_address(page), src);
-		}
+		if (likely(pfn != BM_END_OF_MAP))
+			copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
 	} while (pfn != BM_END_OF_MAP);
 }
 
+/* Total number of image pages */
+static unsigned int nr_copy_pages;
+/* Number of pages needed for saving the original pfns of the image pages */
+static unsigned int nr_meta_pages;
+
 /**
  *	swsusp_free - free pages allocated for the suspend.
  *
@@ -792,7 +824,7 @@
 				if (PageNosave(page) && PageNosaveFree(page)) {
 					ClearPageNosave(page);
 					ClearPageNosaveFree(page);
-					free_page((long) page_address(page));
+					__free_page(page);
 				}
 			}
 	}
@@ -802,34 +834,108 @@
 	buffer = NULL;
 }
 
+#ifdef CONFIG_HIGHMEM
+/**
+  *	count_pages_for_highmem - compute the number of non-highmem pages
+  *	that will be necessary for creating copies of highmem pages.
+  */
+
+static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
+{
+	unsigned int free_highmem = count_free_highmem_pages();
+
+	if (free_highmem >= nr_highmem)
+		nr_highmem = 0;
+	else
+		nr_highmem -= free_highmem;
+
+	return nr_highmem;
+}
+#else
+static unsigned int
+count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
+#endif /* CONFIG_HIGHMEM */
 
 /**
- *	enough_free_mem - Make sure we enough free memory to snapshot.
- *
- *	Returns TRUE or FALSE after checking the number of available
- *	free pages.
+ *	enough_free_mem - Make sure we have enough free memory for the
+ *	snapshot image.
  */
 
-static int enough_free_mem(unsigned int nr_pages)
+static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
 {
 	struct zone *zone;
 	unsigned int free = 0, meta = 0;
 
-	for_each_zone (zone)
-		if (!is_highmem(zone)) {
+	for_each_zone(zone) {
+		meta += snapshot_additional_pages(zone);
+		if (!is_highmem(zone))
 			free += zone->free_pages;
-			meta += snapshot_additional_pages(zone);
-		}
+	}
 
-	pr_debug("swsusp: pages needed: %u + %u + %u, available pages: %u\n",
+	nr_pages += count_pages_for_highmem(nr_highmem);
+	pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
 		nr_pages, PAGES_FOR_IO, meta, free);
 
 	return free > nr_pages + PAGES_FOR_IO + meta;
 }
 
+#ifdef CONFIG_HIGHMEM
+/**
+ *	get_highmem_buffer - if there are some highmem pages in the suspend
+ *	image, we may need the buffer to copy them and/or load their data.
+ */
+
+static inline int get_highmem_buffer(int safe_needed)
+{
+	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
+	return buffer ? 0 : -ENOMEM;
+}
+
+/**
+ *	alloc_highmem_image_pages - allocate some highmem pages for the image.
+ *	Try to allocate as many pages as needed, but if the number of free
+ *	highmem pages is lesser than that, allocate them all.
+ */
+
+static inline unsigned int
+alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
+{
+	unsigned int to_alloc = count_free_highmem_pages();
+
+	if (to_alloc > nr_highmem)
+		to_alloc = nr_highmem;
+
+	nr_highmem -= to_alloc;
+	while (to_alloc-- > 0) {
+		struct page *page;
+
+		page = alloc_image_page(__GFP_HIGHMEM);
+		memory_bm_set_bit(bm, page_to_pfn(page));
+	}
+	return nr_highmem;
+}
+#else
+static inline int get_highmem_buffer(int safe_needed) { return 0; }
+
+static inline unsigned int
+alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
+#endif /* CONFIG_HIGHMEM */
+
+/**
+ *	swsusp_alloc - allocate memory for the suspend image
+ *
+ *	We first try to allocate as many highmem pages as there are
+ *	saveable highmem pages in the system.  If that fails, we allocate
+ *	non-highmem pages for the copies of the remaining highmem ones.
+ *
+ *	In this approach it is likely that the copies of highmem pages will
+ *	also be located in the high memory, because of the way in which
+ *	copy_data_pages() works.
+ */
+
 static int
 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
-		unsigned int nr_pages)
+		unsigned int nr_pages, unsigned int nr_highmem)
 {
 	int error;
 
@@ -841,46 +947,61 @@
 	if (error)
 		goto Free;
 
+	if (nr_highmem > 0) {
+		error = get_highmem_buffer(PG_ANY);
+		if (error)
+			goto Free;
+
+		nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem);
+	}
 	while (nr_pages-- > 0) {
-		struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+		struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
+
 		if (!page)
 			goto Free;
 
-		SetPageNosave(page);
-		SetPageNosaveFree(page);
 		memory_bm_set_bit(copy_bm, page_to_pfn(page));
 	}
 	return 0;
 
-Free:
+ Free:
 	swsusp_free();
 	return -ENOMEM;
 }
 
-/* Memory bitmap used for marking saveable pages */
+/* Memory bitmap used for marking saveable pages (during suspend) or the
+ * suspend image pages (during resume)
+ */
 static struct memory_bitmap orig_bm;
-/* Memory bitmap used for marking allocated pages that will contain the copies
- * of saveable pages
+/* Memory bitmap used on suspend for marking allocated pages that will contain
+ * the copies of saveable pages.  During resume it is initially used for
+ * marking the suspend image pages, but then its set bits are duplicated in
+ * @orig_bm and it is released.  Next, on systems with high memory, it may be
+ * used for marking "safe" highmem pages, but it has to be reinitialized for
+ * this purpose.
  */
 static struct memory_bitmap copy_bm;
 
 asmlinkage int swsusp_save(void)
 {
-	unsigned int nr_pages;
+	unsigned int nr_pages, nr_highmem;
 
-	pr_debug("swsusp: critical section: \n");
+	printk("swsusp: critical section: \n");
 
 	drain_local_pages();
 	nr_pages = count_data_pages();
-	printk("swsusp: Need to copy %u pages\n", nr_pages);
+	nr_highmem = count_highmem_pages();
+	printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem);
 
-	if (!enough_free_mem(nr_pages)) {
+	if (!enough_free_mem(nr_pages, nr_highmem)) {
 		printk(KERN_ERR "swsusp: Not enough free memory\n");
 		return -ENOMEM;
 	}
 
-	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages))
+	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
+		printk(KERN_ERR "swsusp: Memory allocation failed\n");
 		return -ENOMEM;
+	}
 
 	/* During allocating of suspend pagedir, new cold pages may appear.
 	 * Kill them.
@@ -894,10 +1015,12 @@
 	 * touch swap space! Except we must write out our image of course.
 	 */
 
+	nr_pages += nr_highmem;
 	nr_copy_pages = nr_pages;
-	nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
 
 	printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
+
 	return 0;
 }
 
@@ -960,7 +1083,7 @@
 
 	if (!buffer) {
 		/* This makes the buffer be freed by swsusp_free() */
-		buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
+		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
 		if (!buffer)
 			return -ENOMEM;
 	}
@@ -975,9 +1098,23 @@
 			memset(buffer, 0, PAGE_SIZE);
 			pack_pfns(buffer, &orig_bm);
 		} else {
-			unsigned long pfn = memory_bm_next_pfn(&copy_bm);
+			struct page *page;
 
-			handle->buffer = page_address(pfn_to_page(pfn));
+			page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
+			if (PageHighMem(page)) {
+				/* Highmem pages are copied to the buffer,
+				 * because we can't return with a kmapped
+				 * highmem page (we may not be called again).
+				 */
+				void *kaddr;
+
+				kaddr = kmap_atomic(page, KM_USER0);
+				memcpy(buffer, kaddr, PAGE_SIZE);
+				kunmap_atomic(kaddr, KM_USER0);
+				handle->buffer = buffer;
+			} else {
+				handle->buffer = page_address(page);
+			}
 		}
 		handle->prev = handle->cur;
 	}
@@ -1005,7 +1142,7 @@
 	unsigned long pfn, max_zone_pfn;
 
 	/* Clear page flags */
-	for_each_zone (zone) {
+	for_each_zone(zone) {
 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 			if (pfn_valid(pfn))
@@ -1101,6 +1238,218 @@
 	}
 }
 
+/* List of "safe" pages that may be used to store data loaded from the suspend
+ * image
+ */
+static struct linked_page *safe_pages_list;
+
+#ifdef CONFIG_HIGHMEM
+/* struct highmem_pbe is used for creating the list of highmem pages that
+ * should be restored atomically during the resume from disk, because the page
+ * frames they have occupied before the suspend are in use.
+ */
+struct highmem_pbe {
+	struct page *copy_page;	/* data is here now */
+	struct page *orig_page;	/* data was here before the suspend */
+	struct highmem_pbe *next;
+};
+
+/* List of highmem PBEs needed for restoring the highmem pages that were
+ * allocated before the suspend and included in the suspend image, but have
+ * also been allocated by the "resume" kernel, so their contents cannot be
+ * written directly to their "original" page frames.
+ */
+static struct highmem_pbe *highmem_pblist;
+
+/**
+ *	count_highmem_image_pages - compute the number of highmem pages in the
+ *	suspend image.  The bits in the memory bitmap @bm that correspond to the
+ *	image pages are assumed to be set.
+ */
+
+static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
+{
+	unsigned long pfn;
+	unsigned int cnt = 0;
+
+	memory_bm_position_reset(bm);
+	pfn = memory_bm_next_pfn(bm);
+	while (pfn != BM_END_OF_MAP) {
+		if (PageHighMem(pfn_to_page(pfn)))
+			cnt++;
+
+		pfn = memory_bm_next_pfn(bm);
+	}
+	return cnt;
+}
+
+/**
+ *	prepare_highmem_image - try to allocate as many highmem pages as
+ *	there are highmem image pages (@nr_highmem_p points to the variable
+ *	containing the number of highmem image pages).  The pages that are
+ *	"safe" (ie. will not be overwritten when the suspend image is
+ *	restored) have the corresponding bits set in @bm (it must be
+ *	unitialized).
+ *
+ *	NOTE: This function should not be called if there are no highmem
+ *	image pages.
+ */
+
+static unsigned int safe_highmem_pages;
+
+static struct memory_bitmap *safe_highmem_bm;
+
+static int
+prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
+{
+	unsigned int to_alloc;
+
+	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
+		return -ENOMEM;
+
+	if (get_highmem_buffer(PG_SAFE))
+		return -ENOMEM;
+
+	to_alloc = count_free_highmem_pages();
+	if (to_alloc > *nr_highmem_p)
+		to_alloc = *nr_highmem_p;
+	else
+		*nr_highmem_p = to_alloc;
+
+	safe_highmem_pages = 0;
+	while (to_alloc-- > 0) {
+		struct page *page;
+
+		page = alloc_page(__GFP_HIGHMEM);
+		if (!PageNosaveFree(page)) {
+			/* The page is "safe", set its bit the bitmap */
+			memory_bm_set_bit(bm, page_to_pfn(page));
+			safe_highmem_pages++;
+		}
+		/* Mark the page as allocated */
+		SetPageNosave(page);
+		SetPageNosaveFree(page);
+	}
+	memory_bm_position_reset(bm);
+	safe_highmem_bm = bm;
+	return 0;
+}
+
+/**
+ *	get_highmem_page_buffer - for given highmem image page find the buffer
+ *	that suspend_write_next() should set for its caller to write to.
+ *
+ *	If the page is to be saved to its "original" page frame or a copy of
+ *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
+ *	the copy of the page is to be made in normal memory, so the address of
+ *	the copy is returned.
+ *
+ *	If @buffer is returned, the caller of suspend_write_next() will write
+ *	the page's contents to @buffer, so they will have to be copied to the
+ *	right location on the next call to suspend_write_next() and it is done
+ *	with the help of copy_last_highmem_page().  For this purpose, if
+ *	@buffer is returned, @last_highmem page is set to the page to which
+ *	the data will have to be copied from @buffer.
+ */
+
+static struct page *last_highmem_page;
+
+static void *
+get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
+{
+	struct highmem_pbe *pbe;
+	void *kaddr;
+
+	if (PageNosave(page) && PageNosaveFree(page)) {
+		/* We have allocated the "original" page frame and we can
+		 * use it directly to store the loaded page.
+		 */
+		last_highmem_page = page;
+		return buffer;
+	}
+	/* The "original" page frame has not been allocated and we have to
+	 * use a "safe" page frame to store the loaded page.
+	 */
+	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
+	if (!pbe) {
+		swsusp_free();
+		return NULL;
+	}
+	pbe->orig_page = page;
+	if (safe_highmem_pages > 0) {
+		struct page *tmp;
+
+		/* Copy of the page will be stored in high memory */
+		kaddr = buffer;
+		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
+		safe_highmem_pages--;
+		last_highmem_page = tmp;
+		pbe->copy_page = tmp;
+	} else {
+		/* Copy of the page will be stored in normal memory */
+		kaddr = safe_pages_list;
+		safe_pages_list = safe_pages_list->next;
+		pbe->copy_page = virt_to_page(kaddr);
+	}
+	pbe->next = highmem_pblist;
+	highmem_pblist = pbe;
+	return kaddr;
+}
+
+/**
+ *	copy_last_highmem_page - copy the contents of a highmem image from
+ *	@buffer, where the caller of snapshot_write_next() has place them,
+ *	to the right location represented by @last_highmem_page .
+ */
+
+static void copy_last_highmem_page(void)
+{
+	if (last_highmem_page) {
+		void *dst;
+
+		dst = kmap_atomic(last_highmem_page, KM_USER0);
+		memcpy(dst, buffer, PAGE_SIZE);
+		kunmap_atomic(dst, KM_USER0);
+		last_highmem_page = NULL;
+	}
+}
+
+static inline int last_highmem_page_copied(void)
+{
+	return !last_highmem_page;
+}
+
+static inline void free_highmem_data(void)
+{
+	if (safe_highmem_bm)
+		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
+
+	if (buffer)
+		free_image_page(buffer, PG_UNSAFE_CLEAR);
+}
+#else
+static inline int get_safe_write_buffer(void) { return 0; }
+
+static unsigned int
+count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
+
+static inline int
+prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
+{
+	return 0;
+}
+
+static inline void *
+get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
+{
+	return NULL;
+}
+
+static inline void copy_last_highmem_page(void) {}
+static inline int last_highmem_page_copied(void) { return 1; }
+static inline void free_highmem_data(void) {}
+#endif /* CONFIG_HIGHMEM */
+
 /**
  *	prepare_image - use the memory bitmap @bm to mark the pages that will
  *	be overwritten in the process of restoring the system memory state
@@ -1110,20 +1459,25 @@
  *	The idea is to allocate a new memory bitmap first and then allocate
  *	as many pages as needed for the image data, but not to assign these
  *	pages to specific tasks initially.  Instead, we just mark them as
- *	allocated and create a list of "safe" pages that will be used later.
+ *	allocated and create a lists of "safe" pages that will be used
+ *	later.  On systems with high memory a list of "safe" highmem pages is
+ *	also created.
  */
 
 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
 
-static struct linked_page *safe_pages_list;
-
 static int
 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
 {
-	unsigned int nr_pages;
+	unsigned int nr_pages, nr_highmem;
 	struct linked_page *sp_list, *lp;
 	int error;
 
+	/* If there is no highmem, the buffer will not be necessary */
+	free_image_page(buffer, PG_UNSAFE_CLEAR);
+	buffer = NULL;
+
+	nr_highmem = count_highmem_image_pages(bm);
 	error = mark_unsafe_pages(bm);
 	if (error)
 		goto Free;
@@ -1134,6 +1488,11 @@
 
 	duplicate_memory_bitmap(new_bm, bm);
 	memory_bm_free(bm, PG_UNSAFE_KEEP);
+	if (nr_highmem > 0) {
+		error = prepare_highmem_image(bm, &nr_highmem);
+		if (error)
+			goto Free;
+	}
 	/* Reserve some safe pages for potential later use.
 	 *
 	 * NOTE: This way we make sure there will be enough safe pages for the
@@ -1142,10 +1501,10 @@
 	 */
 	sp_list = NULL;
 	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
-	nr_pages = nr_copy_pages - allocated_unsafe_pages;
+	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
 	while (nr_pages > 0) {
-		lp = alloc_image_page(GFP_ATOMIC, PG_SAFE);
+		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
 		if (!lp) {
 			error = -ENOMEM;
 			goto Free;
@@ -1156,7 +1515,7 @@
 	}
 	/* Preallocate memory for the image */
 	safe_pages_list = NULL;
-	nr_pages = nr_copy_pages - allocated_unsafe_pages;
+	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
 	while (nr_pages > 0) {
 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
 		if (!lp) {
@@ -1181,7 +1540,7 @@
 	}
 	return 0;
 
-Free:
+ Free:
 	swsusp_free();
 	return error;
 }
@@ -1196,6 +1555,9 @@
 	struct pbe *pbe;
 	struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
 
+	if (PageHighMem(page))
+		return get_highmem_page_buffer(page, ca);
+
 	if (PageNosave(page) && PageNosaveFree(page))
 		/* We have allocated the "original" page frame and we can
 		 * use it directly to store the loaded page.
@@ -1210,12 +1572,12 @@
 		swsusp_free();
 		return NULL;
 	}
-	pbe->orig_address = (unsigned long)page_address(page);
-	pbe->address = (unsigned long)safe_pages_list;
+	pbe->orig_address = page_address(page);
+	pbe->address = safe_pages_list;
 	safe_pages_list = safe_pages_list->next;
 	pbe->next = restore_pblist;
 	restore_pblist = pbe;
-	return (void *)pbe->address;
+	return pbe->address;
 }
 
 /**
@@ -1249,14 +1611,16 @@
 	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
 		return 0;
 
-	if (!buffer) {
-		/* This makes the buffer be freed by swsusp_free() */
-		buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
+	if (handle->offset == 0) {
+		if (!buffer)
+			/* This makes the buffer be freed by swsusp_free() */
+			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
+
 		if (!buffer)
 			return -ENOMEM;
-	}
-	if (!handle->offset)
+
 		handle->buffer = buffer;
+	}
 	handle->sync_read = 1;
 	if (handle->prev < handle->cur) {
 		if (handle->prev == 0) {
@@ -1284,8 +1648,10 @@
 					return -ENOMEM;
 			}
 		} else {
+			copy_last_highmem_page();
 			handle->buffer = get_buffer(&orig_bm, &ca);
-			handle->sync_read = 0;
+			if (handle->buffer != buffer)
+				handle->sync_read = 0;
 		}
 		handle->prev = handle->cur;
 	}
@@ -1301,15 +1667,73 @@
 	return count;
 }
 
+/**
+ *	snapshot_write_finalize - must be called after the last call to
+ *	snapshot_write_next() in case the last page in the image happens
+ *	to be a highmem page and its contents should be stored in the
+ *	highmem.  Additionally, it releases the memory that will not be
+ *	used any more.
+ */
+
+void snapshot_write_finalize(struct snapshot_handle *handle)
+{
+	copy_last_highmem_page();
+	/* Free only if we have loaded the image entirely */
+	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
+		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
+		free_highmem_data();
+	}
+}
+
 int snapshot_image_loaded(struct snapshot_handle *handle)
 {
-	return !(!nr_copy_pages ||
+	return !(!nr_copy_pages || !last_highmem_page_copied() ||
 			handle->cur <= nr_meta_pages + nr_copy_pages);
 }
 
-void snapshot_free_unused_memory(struct snapshot_handle *handle)
+#ifdef CONFIG_HIGHMEM
+/* Assumes that @buf is ready and points to a "safe" page */
+static inline void
+swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
 {
-	/* Free only if we have loaded the image entirely */
-	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
-		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
+	void *kaddr1, *kaddr2;
+
+	kaddr1 = kmap_atomic(p1, KM_USER0);
+	kaddr2 = kmap_atomic(p2, KM_USER1);
+	memcpy(buf, kaddr1, PAGE_SIZE);
+	memcpy(kaddr1, kaddr2, PAGE_SIZE);
+	memcpy(kaddr2, buf, PAGE_SIZE);
+	kunmap_atomic(kaddr1, KM_USER0);
+	kunmap_atomic(kaddr2, KM_USER1);
 }
+
+/**
+ *	restore_highmem - for each highmem page that was allocated before
+ *	the suspend and included in the suspend image, and also has been
+ *	allocated by the "resume" kernel swap its current (ie. "before
+ *	resume") contents with the previous (ie. "before suspend") one.
+ *
+ *	If the resume eventually fails, we can call this function once
+ *	again and restore the "before resume" highmem state.
+ */
+
+int restore_highmem(void)
+{
+	struct highmem_pbe *pbe = highmem_pblist;
+	void *buf;
+
+	if (!pbe)
+		return 0;
+
+	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
+	if (!buf)
+		return -ENOMEM;
+
+	while (pbe) {
+		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
+		pbe = pbe->next;
+	}
+	free_image_page(buf, PG_UNSAFE_CLEAR);
+	return 0;
+}
+#endif /* CONFIG_HIGHMEM */
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 1a3b0dd..f133d4a 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -34,177 +34,77 @@
 #define SWSUSP_SIG	"S1SUSPEND"
 
 static struct swsusp_header {
-	char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)];
-	swp_entry_t image;
+	char reserved[PAGE_SIZE - 20 - sizeof(sector_t)];
+	sector_t image;
 	char	orig_sig[10];
 	char	sig[10];
 } __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
 
 /*
- * Saving part...
+ * General things
  */
 
 static unsigned short root_swap = 0xffff;
+static struct block_device *resume_bdev;
 
-static int mark_swapfiles(swp_entry_t start)
+/**
+ *	submit - submit BIO request.
+ *	@rw:	READ or WRITE.
+ *	@off	physical offset of page.
+ *	@page:	page we're reading or writing.
+ *	@bio_chain: list of pending biod (for async reading)
+ *
+ *	Straight from the textbook - allocate and initialize the bio.
+ *	If we're reading, make sure the page is marked as dirty.
+ *	Then submit it and, if @bio_chain == NULL, wait.
+ */
+static int submit(int rw, pgoff_t page_off, struct page *page,
+			struct bio **bio_chain)
 {
-	int error;
+	struct bio *bio;
 
-	rw_swap_page_sync(READ, swp_entry(root_swap, 0),
-			  virt_to_page((unsigned long)&swsusp_header), NULL);
-	if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) ||
-	    !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
-		memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
-		memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
-		swsusp_header.image = start;
-		error = rw_swap_page_sync(WRITE, swp_entry(root_swap, 0),
-				virt_to_page((unsigned long)&swsusp_header),
-				NULL);
+	bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+	if (!bio)
+		return -ENOMEM;
+	bio->bi_sector = page_off * (PAGE_SIZE >> 9);
+	bio->bi_bdev = resume_bdev;
+	bio->bi_end_io = end_swap_bio_read;
+
+	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+		printk("swsusp: ERROR: adding page to bio at %ld\n", page_off);
+		bio_put(bio);
+		return -EFAULT;
+	}
+
+	lock_page(page);
+	bio_get(bio);
+
+	if (bio_chain == NULL) {
+		submit_bio(rw | (1 << BIO_RW_SYNC), bio);
+		wait_on_page_locked(page);
+		if (rw == READ)
+			bio_set_pages_dirty(bio);
+		bio_put(bio);
 	} else {
-		pr_debug("swsusp: Partition is not swap space.\n");
-		error = -ENODEV;
+		if (rw == READ)
+			get_page(page);	/* These pages are freed later */
+		bio->bi_private = *bio_chain;
+		*bio_chain = bio;
+		submit_bio(rw | (1 << BIO_RW_SYNC), bio);
 	}
-	return error;
-}
-
-/**
- *	swsusp_swap_check - check if the resume device is a swap device
- *	and get its index (if so)
- */
-
-static int swsusp_swap_check(void) /* This is called before saving image */
-{
-	int res = swap_type_of(swsusp_resume_device);
-
-	if (res >= 0) {
-		root_swap = res;
-		return 0;
-	}
-	return res;
-}
-
-/**
- *	write_page - Write one page to given swap location.
- *	@buf:		Address we're writing.
- *	@offset:	Offset of the swap page we're writing to.
- *	@bio_chain:	Link the next write BIO here
- */
-
-static int write_page(void *buf, unsigned long offset, struct bio **bio_chain)
-{
-	swp_entry_t entry;
-	int error = -ENOSPC;
-
-	if (offset) {
-		struct page *page = virt_to_page(buf);
-
-		if (bio_chain) {
-			/*
-			 * Whether or not we successfully allocated a copy page,
-			 * we take a ref on the page here.  It gets undone in
-			 * wait_on_bio_chain().
-			 */
-			struct page *page_copy;
-			page_copy = alloc_page(GFP_ATOMIC);
-			if (page_copy == NULL) {
-				WARN_ON_ONCE(1);
-				bio_chain = NULL;	/* Go synchronous */
-				get_page(page);
-			} else {
-				memcpy(page_address(page_copy),
-					page_address(page), PAGE_SIZE);
-				page = page_copy;
-			}
-		}
-		entry = swp_entry(root_swap, offset);
-		error = rw_swap_page_sync(WRITE, entry, page, bio_chain);
-	}
-	return error;
-}
-
-/*
- *	The swap map is a data structure used for keeping track of each page
- *	written to a swap partition.  It consists of many swap_map_page
- *	structures that contain each an array of MAP_PAGE_SIZE swap entries.
- *	These structures are stored on the swap and linked together with the
- *	help of the .next_swap member.
- *
- *	The swap map is created during suspend.  The swap map pages are
- *	allocated and populated one at a time, so we only need one memory
- *	page to set up the entire structure.
- *
- *	During resume we also only need to use one swap_map_page structure
- *	at a time.
- */
-
-#define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(long) - 1)
-
-struct swap_map_page {
-	unsigned long		entries[MAP_PAGE_ENTRIES];
-	unsigned long		next_swap;
-};
-
-/**
- *	The swap_map_handle structure is used for handling swap in
- *	a file-alike way
- */
-
-struct swap_map_handle {
-	struct swap_map_page *cur;
-	unsigned long cur_swap;
-	struct bitmap_page *bitmap;
-	unsigned int k;
-};
-
-static void release_swap_writer(struct swap_map_handle *handle)
-{
-	if (handle->cur)
-		free_page((unsigned long)handle->cur);
-	handle->cur = NULL;
-	if (handle->bitmap)
-		free_bitmap(handle->bitmap);
-	handle->bitmap = NULL;
-}
-
-static void show_speed(struct timeval *start, struct timeval *stop,
-			unsigned nr_pages, char *msg)
-{
-	s64 elapsed_centisecs64;
-	int centisecs;
-	int k;
-	int kps;
-
-	elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
-	do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
-	centisecs = elapsed_centisecs64;
-	if (centisecs == 0)
-		centisecs = 1;	/* avoid div-by-zero */
-	k = nr_pages * (PAGE_SIZE / 1024);
-	kps = (k * 100) / centisecs;
-	printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
-			centisecs / 100, centisecs % 100,
-			kps / 1000, (kps % 1000) / 10);
-}
-
-static int get_swap_writer(struct swap_map_handle *handle)
-{
-	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
-	if (!handle->cur)
-		return -ENOMEM;
-	handle->bitmap = alloc_bitmap(count_swap_pages(root_swap, 0));
-	if (!handle->bitmap) {
-		release_swap_writer(handle);
-		return -ENOMEM;
-	}
-	handle->cur_swap = alloc_swap_page(root_swap, handle->bitmap);
-	if (!handle->cur_swap) {
-		release_swap_writer(handle);
-		return -ENOSPC;
-	}
-	handle->k = 0;
 	return 0;
 }
 
+static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
+{
+	return submit(READ, page_off, virt_to_page(addr), bio_chain);
+}
+
+static int bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
+{
+	return submit(WRITE, page_off, virt_to_page(addr), bio_chain);
+}
+
 static int wait_on_bio_chain(struct bio **bio_chain)
 {
 	struct bio *bio;
@@ -233,15 +133,155 @@
 	return ret;
 }
 
+/*
+ * Saving part
+ */
+
+static int mark_swapfiles(sector_t start)
+{
+	int error;
+
+	bio_read_page(swsusp_resume_block, &swsusp_header, NULL);
+	if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) ||
+	    !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
+		memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
+		memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
+		swsusp_header.image = start;
+		error = bio_write_page(swsusp_resume_block,
+					&swsusp_header, NULL);
+	} else {
+		printk(KERN_ERR "swsusp: Swap header not found!\n");
+		error = -ENODEV;
+	}
+	return error;
+}
+
+/**
+ *	swsusp_swap_check - check if the resume device is a swap device
+ *	and get its index (if so)
+ */
+
+static int swsusp_swap_check(void) /* This is called before saving image */
+{
+	int res;
+
+	res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
+	if (res < 0)
+		return res;
+
+	root_swap = res;
+	resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_WRITE);
+	if (IS_ERR(resume_bdev))
+		return PTR_ERR(resume_bdev);
+
+	res = set_blocksize(resume_bdev, PAGE_SIZE);
+	if (res < 0)
+		blkdev_put(resume_bdev);
+
+	return res;
+}
+
+/**
+ *	write_page - Write one page to given swap location.
+ *	@buf:		Address we're writing.
+ *	@offset:	Offset of the swap page we're writing to.
+ *	@bio_chain:	Link the next write BIO here
+ */
+
+static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
+{
+	void *src;
+
+	if (!offset)
+		return -ENOSPC;
+
+	if (bio_chain) {
+		src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+		if (src) {
+			memcpy(src, buf, PAGE_SIZE);
+		} else {
+			WARN_ON_ONCE(1);
+			bio_chain = NULL;	/* Go synchronous */
+			src = buf;
+		}
+	} else {
+		src = buf;
+	}
+	return bio_write_page(offset, src, bio_chain);
+}
+
+/*
+ *	The swap map is a data structure used for keeping track of each page
+ *	written to a swap partition.  It consists of many swap_map_page
+ *	structures that contain each an array of MAP_PAGE_SIZE swap entries.
+ *	These structures are stored on the swap and linked together with the
+ *	help of the .next_swap member.
+ *
+ *	The swap map is created during suspend.  The swap map pages are
+ *	allocated and populated one at a time, so we only need one memory
+ *	page to set up the entire structure.
+ *
+ *	During resume we also only need to use one swap_map_page structure
+ *	at a time.
+ */
+
+#define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
+
+struct swap_map_page {
+	sector_t entries[MAP_PAGE_ENTRIES];
+	sector_t next_swap;
+};
+
+/**
+ *	The swap_map_handle structure is used for handling swap in
+ *	a file-alike way
+ */
+
+struct swap_map_handle {
+	struct swap_map_page *cur;
+	sector_t cur_swap;
+	struct bitmap_page *bitmap;
+	unsigned int k;
+};
+
+static void release_swap_writer(struct swap_map_handle *handle)
+{
+	if (handle->cur)
+		free_page((unsigned long)handle->cur);
+	handle->cur = NULL;
+	if (handle->bitmap)
+		free_bitmap(handle->bitmap);
+	handle->bitmap = NULL;
+}
+
+static int get_swap_writer(struct swap_map_handle *handle)
+{
+	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
+	if (!handle->cur)
+		return -ENOMEM;
+	handle->bitmap = alloc_bitmap(count_swap_pages(root_swap, 0));
+	if (!handle->bitmap) {
+		release_swap_writer(handle);
+		return -ENOMEM;
+	}
+	handle->cur_swap = alloc_swapdev_block(root_swap, handle->bitmap);
+	if (!handle->cur_swap) {
+		release_swap_writer(handle);
+		return -ENOSPC;
+	}
+	handle->k = 0;
+	return 0;
+}
+
 static int swap_write_page(struct swap_map_handle *handle, void *buf,
 				struct bio **bio_chain)
 {
 	int error = 0;
-	unsigned long offset;
+	sector_t offset;
 
 	if (!handle->cur)
 		return -EINVAL;
-	offset = alloc_swap_page(root_swap, handle->bitmap);
+	offset = alloc_swapdev_block(root_swap, handle->bitmap);
 	error = write_page(buf, offset, bio_chain);
 	if (error)
 		return error;
@@ -250,7 +290,7 @@
 		error = wait_on_bio_chain(bio_chain);
 		if (error)
 			goto out;
-		offset = alloc_swap_page(root_swap, handle->bitmap);
+		offset = alloc_swapdev_block(root_swap, handle->bitmap);
 		if (!offset)
 			return -ENOSPC;
 		handle->cur->next_swap = offset;
@@ -261,7 +301,7 @@
 		handle->cur_swap = offset;
 		handle->k = 0;
 	}
-out:
+ out:
 	return error;
 }
 
@@ -315,7 +355,7 @@
 		error = err2;
 	if (!error)
 		printk("\b\b\b\bdone\n");
-	show_speed(&start, &stop, nr_to_write, "Wrote");
+	swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
 	return error;
 }
 
@@ -350,100 +390,50 @@
 	struct swsusp_info *header;
 	int error;
 
-	if ((error = swsusp_swap_check())) {
+	error = swsusp_swap_check();
+	if (error) {
 		printk(KERN_ERR "swsusp: Cannot find swap device, try "
 				"swapon -a.\n");
 		return error;
 	}
 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
 	error = snapshot_read_next(&snapshot, PAGE_SIZE);
-	if (error < PAGE_SIZE)
-		return error < 0 ? error : -EFAULT;
+	if (error < PAGE_SIZE) {
+		if (error >= 0)
+			error = -EFAULT;
+
+		goto out;
+	}
 	header = (struct swsusp_info *)data_of(snapshot);
 	if (!enough_swap(header->pages)) {
 		printk(KERN_ERR "swsusp: Not enough free swap\n");
-		return -ENOSPC;
+		error = -ENOSPC;
+		goto out;
 	}
 	error = get_swap_writer(&handle);
 	if (!error) {
-		unsigned long start = handle.cur_swap;
+		sector_t start = handle.cur_swap;
+
 		error = swap_write_page(&handle, header, NULL);
 		if (!error)
 			error = save_image(&handle, &snapshot,
 					header->pages - 1);
+
 		if (!error) {
 			flush_swap_writer(&handle);
 			printk("S");
-			error = mark_swapfiles(swp_entry(root_swap, start));
+			error = mark_swapfiles(start);
 			printk("|\n");
 		}
 	}
 	if (error)
 		free_all_swap_pages(root_swap, handle.bitmap);
 	release_swap_writer(&handle);
+ out:
+	swsusp_close();
 	return error;
 }
 
-static struct block_device *resume_bdev;
-
-/**
- *	submit - submit BIO request.
- *	@rw:	READ or WRITE.
- *	@off	physical offset of page.
- *	@page:	page we're reading or writing.
- *	@bio_chain: list of pending biod (for async reading)
- *
- *	Straight from the textbook - allocate and initialize the bio.
- *	If we're reading, make sure the page is marked as dirty.
- *	Then submit it and, if @bio_chain == NULL, wait.
- */
-static int submit(int rw, pgoff_t page_off, struct page *page,
-			struct bio **bio_chain)
-{
-	struct bio *bio;
-
-	bio = bio_alloc(GFP_ATOMIC, 1);
-	if (!bio)
-		return -ENOMEM;
-	bio->bi_sector = page_off * (PAGE_SIZE >> 9);
-	bio->bi_bdev = resume_bdev;
-	bio->bi_end_io = end_swap_bio_read;
-
-	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
-		printk("swsusp: ERROR: adding page to bio at %ld\n", page_off);
-		bio_put(bio);
-		return -EFAULT;
-	}
-
-	lock_page(page);
-	bio_get(bio);
-
-	if (bio_chain == NULL) {
-		submit_bio(rw | (1 << BIO_RW_SYNC), bio);
-		wait_on_page_locked(page);
-		if (rw == READ)
-			bio_set_pages_dirty(bio);
-		bio_put(bio);
-	} else {
-		if (rw == READ)
-			get_page(page);	/* These pages are freed later */
-		bio->bi_private = *bio_chain;
-		*bio_chain = bio;
-		submit_bio(rw | (1 << BIO_RW_SYNC), bio);
-	}
-	return 0;
-}
-
-static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
-{
-	return submit(READ, page_off, virt_to_page(addr), bio_chain);
-}
-
-static int bio_write_page(pgoff_t page_off, void *addr)
-{
-	return submit(WRITE, page_off, virt_to_page(addr), NULL);
-}
-
 /**
  *	The following functions allow us to read data using a swap map
  *	in a file-alike way
@@ -456,17 +446,18 @@
 	handle->cur = NULL;
 }
 
-static int get_swap_reader(struct swap_map_handle *handle,
-                                      swp_entry_t start)
+static int get_swap_reader(struct swap_map_handle *handle, sector_t start)
 {
 	int error;
 
-	if (!swp_offset(start))
+	if (!start)
 		return -EINVAL;
-	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
+
+	handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
 	if (!handle->cur)
 		return -ENOMEM;
-	error = bio_read_page(swp_offset(start), handle->cur, NULL);
+
+	error = bio_read_page(start, handle->cur, NULL);
 	if (error) {
 		release_swap_reader(handle);
 		return error;
@@ -478,7 +469,7 @@
 static int swap_read_page(struct swap_map_handle *handle, void *buf,
 				struct bio **bio_chain)
 {
-	unsigned long offset;
+	sector_t offset;
 	int error;
 
 	if (!handle->cur)
@@ -547,11 +538,11 @@
 		error = err2;
 	if (!error) {
 		printk("\b\b\b\bdone\n");
-		snapshot_free_unused_memory(snapshot);
+		snapshot_write_finalize(snapshot);
 		if (!snapshot_image_loaded(snapshot))
 			error = -ENODATA;
 	}
-	show_speed(&start, &stop, nr_to_read, "Read");
+	swsusp_show_speed(&start, &stop, nr_to_read, "Read");
 	return error;
 }
 
@@ -600,12 +591,16 @@
 	if (!IS_ERR(resume_bdev)) {
 		set_blocksize(resume_bdev, PAGE_SIZE);
 		memset(&swsusp_header, 0, sizeof(swsusp_header));
-		if ((error = bio_read_page(0, &swsusp_header, NULL)))
+		error = bio_read_page(swsusp_resume_block,
+					&swsusp_header, NULL);
+		if (error)
 			return error;
+
 		if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
 			memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
 			/* Reset swap signature now */
-			error = bio_write_page(0, &swsusp_header);
+			error = bio_write_page(swsusp_resume_block,
+						&swsusp_header, NULL);
 		} else {
 			return -EINVAL;
 		}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 0b66659..31aa039 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -49,6 +49,7 @@
 #include <linux/bootmem.h>
 #include <linux/syscalls.h>
 #include <linux/highmem.h>
+#include <linux/time.h>
 
 #include "power.h"
 
@@ -64,10 +65,8 @@
 
 #ifdef CONFIG_HIGHMEM
 unsigned int count_highmem_pages(void);
-int save_highmem(void);
 int restore_highmem(void);
 #else
-static inline int save_highmem(void) { return 0; }
 static inline int restore_highmem(void) { return 0; }
 static inline unsigned int count_highmem_pages(void) { return 0; }
 #endif
@@ -134,18 +133,18 @@
 	return 0;
 }
 
-unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap)
+sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap)
 {
 	unsigned long offset;
 
 	offset = swp_offset(get_swap_page_of_type(swap));
 	if (offset) {
-		if (bitmap_set(bitmap, offset)) {
+		if (bitmap_set(bitmap, offset))
 			swap_free(swp_entry(swap, offset));
-			offset = 0;
-		}
+		else
+			return swapdev_block(swap, offset);
 	}
-	return offset;
+	return 0;
 }
 
 void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
@@ -166,6 +165,34 @@
 }
 
 /**
+ *	swsusp_show_speed - print the time elapsed between two events represented by
+ *	@start and @stop
+ *
+ *	@nr_pages -	number of pages processed between @start and @stop
+ *	@msg -		introductory message to print
+ */
+
+void swsusp_show_speed(struct timeval *start, struct timeval *stop,
+			unsigned nr_pages, char *msg)
+{
+	s64 elapsed_centisecs64;
+	int centisecs;
+	int k;
+	int kps;
+
+	elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
+	do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
+	centisecs = elapsed_centisecs64;
+	if (centisecs == 0)
+		centisecs = 1;	/* avoid div-by-zero */
+	k = nr_pages * (PAGE_SIZE / 1024);
+	kps = (k * 100) / centisecs;
+	printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
+			centisecs / 100, centisecs % 100,
+			kps / 1000, (kps % 1000) / 10);
+}
+
+/**
  *	swsusp_shrink_memory -  Try to free as much memory as needed
  *
  *	... but do not OOM-kill anyone
@@ -184,23 +211,37 @@
 
 int swsusp_shrink_memory(void)
 {
-	long size, tmp;
+	long tmp;
 	struct zone *zone;
 	unsigned long pages = 0;
 	unsigned int i = 0;
 	char *p = "-\\|/";
+	struct timeval start, stop;
 
 	printk("Shrinking memory...  ");
+	do_gettimeofday(&start);
 	do {
-		size = 2 * count_highmem_pages();
-		size += size / 50 + count_data_pages() + PAGES_FOR_IO;
+		long size, highmem_size;
+
+		highmem_size = count_highmem_pages();
+		size = count_data_pages() + PAGES_FOR_IO;
 		tmp = size;
+		size += highmem_size;
 		for_each_zone (zone)
-			if (!is_highmem(zone) && populated_zone(zone)) {
-				tmp -= zone->free_pages;
-				tmp += zone->lowmem_reserve[ZONE_NORMAL];
-				tmp += snapshot_additional_pages(zone);
+			if (populated_zone(zone)) {
+				if (is_highmem(zone)) {
+					highmem_size -= zone->free_pages;
+				} else {
+					tmp -= zone->free_pages;
+					tmp += zone->lowmem_reserve[ZONE_NORMAL];
+					tmp += snapshot_additional_pages(zone);
+				}
 			}
+
+		if (highmem_size < 0)
+			highmem_size = 0;
+
+		tmp += highmem_size;
 		if (tmp > 0) {
 			tmp = __shrink_memory(tmp);
 			if (!tmp)
@@ -212,7 +253,9 @@
 		}
 		printk("\b%c", p[i++%4]);
 	} while (tmp > 0);
+	do_gettimeofday(&stop);
 	printk("\bdone (%lu pages freed)\n", pages);
+	swsusp_show_speed(&start, &stop, pages, "Freed");
 
 	return 0;
 }
@@ -223,6 +266,7 @@
 
 	if ((error = arch_prepare_suspend()))
 		return error;
+
 	local_irq_disable();
 	/* At this point, device_suspend() has been called, but *not*
 	 * device_power_down(). We *must* device_power_down() now.
@@ -235,23 +279,16 @@
 		goto Enable_irqs;
 	}
 
-	if ((error = save_highmem())) {
-		printk(KERN_ERR "swsusp: Not enough free pages for highmem\n");
-		goto Restore_highmem;
-	}
-
 	save_processor_state();
 	if ((error = swsusp_arch_suspend()))
 		printk(KERN_ERR "Error %d suspending\n", error);
 	/* Restore control flow magically appears here */
 	restore_processor_state();
-Restore_highmem:
-	restore_highmem();
 	/* NOTE:  device_power_up() is just a resume() for devices
 	 * that suspended with irqs off ... no overall powerup.
 	 */
 	device_power_up();
-Enable_irqs:
+ Enable_irqs:
 	local_irq_enable();
 	return error;
 }
@@ -268,18 +305,23 @@
 		printk(KERN_ERR "Some devices failed to power down, very bad\n");
 	/* We'll ignore saved state, but this gets preempt count (etc) right */
 	save_processor_state();
-	error = swsusp_arch_resume();
-	/* Code below is only ever reached in case of failure. Otherwise
-	 * execution continues at place where swsusp_arch_suspend was called
-         */
-	BUG_ON(!error);
+	error = restore_highmem();
+	if (!error) {
+		error = swsusp_arch_resume();
+		/* The code below is only ever reached in case of a failure.
+		 * Otherwise execution continues at place where
+		 * swsusp_arch_suspend() was called
+        	 */
+		BUG_ON(!error);
+		/* This call to restore_highmem() undos the previous one */
+		restore_highmem();
+	}
 	/* The only reason why swsusp_arch_resume() can fail is memory being
 	 * very tight, so we have to free it as soon as we can to avoid
 	 * subsequent failures
 	 */
 	swsusp_free();
 	restore_processor_state();
-	restore_highmem();
 	touch_softlockup_watchdog();
 	device_power_up();
 	local_irq_enable();
diff --git a/kernel/power/user.c b/kernel/power/user.c
index d991d3b..89443b8 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -11,6 +11,7 @@
 
 #include <linux/suspend.h>
 #include <linux/syscalls.h>
+#include <linux/reboot.h>
 #include <linux/string.h>
 #include <linux/device.h>
 #include <linux/miscdevice.h>
@@ -21,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
@@ -54,7 +56,8 @@
 	filp->private_data = data;
 	memset(&data->handle, 0, sizeof(struct snapshot_handle));
 	if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
-		data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device) : -1;
+		data->swap = swsusp_resume_device ?
+				swap_type_of(swsusp_resume_device, 0) : -1;
 		data->mode = O_RDONLY;
 	} else {
 		data->swap = -1;
@@ -76,10 +79,10 @@
 	free_all_swap_pages(data->swap, data->bitmap);
 	free_bitmap(data->bitmap);
 	if (data->frozen) {
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		thaw_processes();
 		enable_nonboot_cpus();
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 	}
 	atomic_inc(&device_available);
 	return 0;
@@ -124,7 +127,8 @@
 {
 	int error = 0;
 	struct snapshot_data *data;
-	loff_t offset, avail;
+	loff_t avail;
+	sector_t offset;
 
 	if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
 		return -ENOTTY;
@@ -140,7 +144,7 @@
 	case SNAPSHOT_FREEZE:
 		if (data->frozen)
 			break;
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		error = disable_nonboot_cpus();
 		if (!error) {
 			error = freeze_processes();
@@ -150,7 +154,7 @@
 				error = -EBUSY;
 			}
 		}
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		if (!error)
 			data->frozen = 1;
 		break;
@@ -158,10 +162,10 @@
 	case SNAPSHOT_UNFREEZE:
 		if (!data->frozen)
 			break;
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		thaw_processes();
 		enable_nonboot_cpus();
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		data->frozen = 0;
 		break;
 
@@ -170,7 +174,7 @@
 			error = -EPERM;
 			break;
 		}
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		/* Free memory before shutting down devices. */
 		error = swsusp_shrink_memory();
 		if (!error) {
@@ -183,7 +187,7 @@
 			}
 			resume_console();
 		}
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		if (!error)
 			error = put_user(in_suspend, (unsigned int __user *)arg);
 		if (!error)
@@ -191,13 +195,13 @@
 		break;
 
 	case SNAPSHOT_ATOMIC_RESTORE:
+		snapshot_write_finalize(&data->handle);
 		if (data->mode != O_WRONLY || !data->frozen ||
 		    !snapshot_image_loaded(&data->handle)) {
 			error = -EPERM;
 			break;
 		}
-		snapshot_free_unused_memory(&data->handle);
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		pm_prepare_console();
 		suspend_console();
 		error = device_suspend(PMSG_PRETHAW);
@@ -207,7 +211,7 @@
 		}
 		resume_console();
 		pm_restore_console();
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		break;
 
 	case SNAPSHOT_FREE:
@@ -238,10 +242,10 @@
 				break;
 			}
 		}
-		offset = alloc_swap_page(data->swap, data->bitmap);
+		offset = alloc_swapdev_block(data->swap, data->bitmap);
 		if (offset) {
 			offset <<= PAGE_SHIFT;
-			error = put_user(offset, (loff_t __user *)arg);
+			error = put_user(offset, (sector_t __user *)arg);
 		} else {
 			error = -ENOSPC;
 		}
@@ -264,7 +268,7 @@
 			 * so we need to recode them
 			 */
 			if (old_decode_dev(arg)) {
-				data->swap = swap_type_of(old_decode_dev(arg));
+				data->swap = swap_type_of(old_decode_dev(arg), 0);
 				if (data->swap < 0)
 					error = -ENODEV;
 			} else {
@@ -282,7 +286,7 @@
 			break;
 		}
 
-		if (down_trylock(&pm_sem)) {
+		if (!mutex_trylock(&pm_mutex)) {
 			error = -EBUSY;
 			break;
 		}
@@ -309,8 +313,66 @@
 		if (pm_ops->finish)
 			pm_ops->finish(PM_SUSPEND_MEM);
 
-OutS3:
-		up(&pm_sem);
+ OutS3:
+		mutex_unlock(&pm_mutex);
+		break;
+
+	case SNAPSHOT_PMOPS:
+		switch (arg) {
+
+		case PMOPS_PREPARE:
+			if (pm_ops->prepare) {
+				error = pm_ops->prepare(PM_SUSPEND_DISK);
+			}
+			break;
+
+		case PMOPS_ENTER:
+			kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
+			error = pm_ops->enter(PM_SUSPEND_DISK);
+			break;
+
+		case PMOPS_FINISH:
+			if (pm_ops && pm_ops->finish) {
+				pm_ops->finish(PM_SUSPEND_DISK);
+			}
+			break;
+
+		default:
+			printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
+			error = -EINVAL;
+
+		}
+		break;
+
+	case SNAPSHOT_SET_SWAP_AREA:
+		if (data->bitmap) {
+			error = -EPERM;
+		} else {
+			struct resume_swap_area swap_area;
+			dev_t swdev;
+
+			error = copy_from_user(&swap_area, (void __user *)arg,
+					sizeof(struct resume_swap_area));
+			if (error) {
+				error = -EFAULT;
+				break;
+			}
+
+			/*
+			 * User space encodes device types as two-byte values,
+			 * so we need to recode them
+			 */
+			swdev = old_decode_dev(swap_area.dev);
+			if (swdev) {
+				offset = swap_area.offset;
+				data->swap = swap_type_of(swdev, offset);
+				if (data->swap < 0)
+					error = -ENODEV;
+			} else {
+				data->swap = -1;
+				error = -EINVAL;
+			}
+		}
 		break;
 
 	default:
@@ -321,7 +383,7 @@
 	return error;
 }
 
-static struct file_operations snapshot_fops = {
+static const struct file_operations snapshot_fops = {
 	.open = snapshot_open,
 	.release = snapshot_release,
 	.read = snapshot_read,
diff --git a/kernel/printk.c b/kernel/printk.c
index 6642655..185bb45 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -53,8 +53,6 @@
 	DEFAULT_CONSOLE_LOGLEVEL,	/* default_console_loglevel */
 };
 
-EXPORT_UNUSED_SYMBOL(console_printk);  /*  June 2006  */
-
 /*
  * Low lever drivers may need that to know if they can schedule in
  * their unblank() callback or not. So let's export it.
@@ -335,13 +333,25 @@
 	}
 }
 
+static int __read_mostly ignore_loglevel;
+
+int __init ignore_loglevel_setup(char *str)
+{
+	ignore_loglevel = 1;
+	printk(KERN_INFO "debug: ignoring loglevel setting.\n");
+
+	return 1;
+}
+
+__setup("ignore_loglevel", ignore_loglevel_setup);
+
 /*
  * Write out chars from start to end - 1 inclusive
  */
 static void _call_console_drivers(unsigned long start,
 				unsigned long end, int msg_log_level)
 {
-	if (msg_log_level < console_loglevel &&
+	if ((msg_log_level < console_loglevel || ignore_loglevel) &&
 			console_drivers && start != end) {
 		if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) {
 			/* wrapped write */
@@ -631,12 +641,7 @@
 
 asmlinkage long sys_syslog(int type, char __user *buf, int len)
 {
-	return 0;
-}
-
-int do_syslog(int type, char __user *buf, int len)
-{
-	return 0;
+	return -ENOSYS;
 }
 
 static void call_console_drivers(unsigned long start, unsigned long end)
@@ -777,7 +782,6 @@
 {
 	return console_locked;
 }
-EXPORT_UNUSED_SYMBOL(is_console_locked);  /*  June 2006  */
 
 /**
  * release_console_sem - unlock the console system
diff --git a/kernel/profile.c b/kernel/profile.c
index f940b46..fb5e03d 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -40,7 +40,7 @@
 
 static atomic_t *prof_buffer;
 static unsigned long prof_len, prof_shift;
-static int prof_on __read_mostly;
+int prof_on __read_mostly;
 static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
 #ifdef CONFIG_SMP
 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
@@ -51,9 +51,19 @@
 static int __init profile_setup(char * str)
 {
 	static char __initdata schedstr[] = "schedule";
+	static char __initdata sleepstr[] = "sleep";
 	int par;
 
-	if (!strncmp(str, schedstr, strlen(schedstr))) {
+	if (!strncmp(str, sleepstr, strlen(sleepstr))) {
+		prof_on = SLEEP_PROFILING;
+		if (str[strlen(sleepstr)] == ',')
+			str += strlen(sleepstr) + 1;
+		if (get_option(&str, &par))
+			prof_shift = par;
+		printk(KERN_INFO
+			"kernel sleep profiling enabled (shift: %ld)\n",
+			prof_shift);
+	} else if (!strncmp(str, sleepstr, strlen(sleepstr))) {
 		prof_on = SCHED_PROFILING;
 		if (str[strlen(schedstr)] == ',')
 			str += strlen(schedstr) + 1;
@@ -204,7 +214,8 @@
  * positions to which hits are accounted during short intervals (e.g.
  * several seconds) is usually very small. Exclusion from buffer
  * flipping is provided by interrupt disablement (note that for
- * SCHED_PROFILING profile_hit() may be called from process context).
+ * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
+ * process context).
  * The hash function is meant to be lightweight as opposed to strong,
  * and was vaguely inspired by ppc64 firmware-supported inverted
  * pagetable hash functions, but uses a full hashtable full of finite
@@ -257,7 +268,7 @@
 	mutex_unlock(&profile_flip_mutex);
 }
 
-void profile_hit(int type, void *__pc)
+void profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
 	unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
 	int i, j, cpu;
@@ -274,21 +285,31 @@
 		put_cpu();
 		return;
 	}
+	/*
+	 * We buffer the global profiler buffer into a per-CPU
+	 * queue and thus reduce the number of global (and possibly
+	 * NUMA-alien) accesses. The write-queue is self-coalescing:
+	 */
 	local_irq_save(flags);
 	do {
 		for (j = 0; j < PROFILE_GRPSZ; ++j) {
 			if (hits[i + j].pc == pc) {
-				hits[i + j].hits++;
+				hits[i + j].hits += nr_hits;
 				goto out;
 			} else if (!hits[i + j].hits) {
 				hits[i + j].pc = pc;
-				hits[i + j].hits = 1;
+				hits[i + j].hits = nr_hits;
 				goto out;
 			}
 		}
 		i = (i + secondary) & (NR_PROFILE_HIT - 1);
 	} while (i != primary);
-	atomic_inc(&prof_buffer[pc]);
+
+	/*
+	 * Add the current hit(s) and flush the write-queue out
+	 * to the global buffer:
+	 */
+	atomic_add(nr_hits, &prof_buffer[pc]);
 	for (i = 0; i < NR_PROFILE_HIT; ++i) {
 		atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
 		hits[i].pc = hits[i].hits = 0;
@@ -298,7 +319,6 @@
 	put_cpu();
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __devinit profile_cpu_callback(struct notifier_block *info,
 					unsigned long action, void *__cpu)
 {
@@ -351,19 +371,19 @@
 	}
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 #else /* !CONFIG_SMP */
 #define profile_flip_buffers()		do { } while (0)
 #define profile_discard_flip_buffers()	do { } while (0)
+#define profile_cpu_callback		NULL
 
-void profile_hit(int type, void *__pc)
+void profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
 	unsigned long pc;
 
 	if (prof_on != type || !prof_buffer)
 		return;
 	pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
-	atomic_inc(&prof_buffer[min(pc, prof_len - 1)]);
+	atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
 }
 #endif /* !CONFIG_SMP */
 
@@ -442,7 +462,8 @@
 	read = 0;
 
 	while (p < sizeof(unsigned int) && count > 0) {
-		put_user(*((char *)(&sample_step)+p),buf);
+		if (put_user(*((char *)(&sample_step)+p),buf))
+			return -EFAULT;
 		buf++; p++; count--; read++;
 	}
 	pnt = (char *)prof_buffer + p - sizeof(atomic_t);
@@ -480,7 +501,7 @@
 	return count;
 }
 
-static struct file_operations proc_profile_operations = {
+static const struct file_operations proc_profile_operations = {
 	.read		= read_profile,
 	.write		= write_profile,
 };
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 26bb5ff..3554b76 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -235,12 +235,14 @@
 
 	list = rdp->donelist;
 	while (list) {
-		next = rdp->donelist = list->next;
+		next = list->next;
+		prefetch(next);
 		list->func(list);
 		list = next;
 		if (++count >= rdp->blimit)
 			break;
 	}
+	rdp->donelist = list;
 
 	local_irq_disable();
 	rdp->qlen -= count;
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index e2bda18..c52f981 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -401,7 +401,7 @@
 	cleanup_srcu_struct(&srcu_ctl);
 }
 
-static int srcu_torture_read_lock(void)
+static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
 {
 	return srcu_read_lock(&srcu_ctl);
 }
@@ -419,7 +419,7 @@
 		schedule_timeout_interruptible(longdelay);
 }
 
-static void srcu_torture_read_unlock(int idx)
+static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
 {
 	srcu_read_unlock(&srcu_ctl, idx);
 }
diff --git a/kernel/relay.c b/kernel/relay.c
index f04bbdb..75a3a9a 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -308,9 +308,10 @@
  *	reason waking is deferred is that calling directly from write
  *	causes problems if you're writing from say the scheduler.
  */
-static void wakeup_readers(void *private)
+static void wakeup_readers(struct work_struct *work)
 {
-	struct rchan_buf *buf = private;
+	struct rchan_buf *buf =
+		container_of(work, struct rchan_buf, wake_readers.work);
 	wake_up_interruptible(&buf->read_wait);
 }
 
@@ -328,7 +329,7 @@
 	if (init) {
 		init_waitqueue_head(&buf->read_wait);
 		kref_init(&buf->kref);
-		INIT_WORK(&buf->wake_readers, NULL, NULL);
+		INIT_DELAYED_WORK(&buf->wake_readers, NULL);
 	} else {
 		cancel_delayed_work(&buf->wake_readers);
 		flush_scheduled_work();
@@ -549,7 +550,8 @@
 			buf->padding[old_subbuf];
 		smp_mb();
 		if (waitqueue_active(&buf->read_wait)) {
-			PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf);
+			PREPARE_DELAYED_WORK(&buf->wake_readers,
+					     wakeup_readers);
 			schedule_delayed_work(&buf->wake_readers, 1);
 		}
 	}
@@ -1011,7 +1013,7 @@
 				       actor, &desc);
 }
 
-struct file_operations relay_file_operations = {
+const struct file_operations relay_file_operations = {
 	.open		= relay_file_open,
 	.poll		= relay_file_poll,
 	.mmap		= relay_file_mmap,
diff --git a/kernel/resource.c b/kernel/resource.c
index 6de60c1..7b9a497 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -88,7 +88,7 @@
 	return 0;
 }
 
-static struct seq_operations resource_op = {
+static const struct seq_operations resource_op = {
 	.start	= r_start,
 	.next	= r_next,
 	.stop	= r_stop,
@@ -115,14 +115,14 @@
 	return res;
 }
 
-static struct file_operations proc_ioports_operations = {
+static const struct file_operations proc_ioports_operations = {
 	.open		= ioports_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= seq_release,
 };
 
-static struct file_operations proc_iomem_operations = {
+static const struct file_operations proc_iomem_operations = {
 	.open		= iomem_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 6dcea9d..015fc63 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/sysdev.h>
 #include <linux/timer.h>
+#include <linux/freezer.h>
 
 #include "rtmutex.h"
 
diff --git a/kernel/sched.c b/kernel/sched.c
index 3399701..f385eff 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -34,7 +34,7 @@
 #include <linux/security.h>
 #include <linux/notifier.h>
 #include <linux/profile.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/vmalloc.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
@@ -505,7 +505,7 @@
 	return res;
 }
 
-struct file_operations proc_schedstat_operations = {
+const struct file_operations proc_schedstat_operations = {
 	.open    = schedstat_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -948,6 +948,17 @@
 	}
 #endif
 
+	/*
+	 * Sleep time is in units of nanosecs, so shift by 20 to get a
+	 * milliseconds-range estimation of the amount of time that the task
+	 * spent sleeping:
+	 */
+	if (unlikely(prof_on == SLEEP_PROFILING)) {
+		if (p->state == TASK_UNINTERRUPTIBLE)
+			profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
+				     (now - p->timestamp) >> 20);
+	}
+
 	if (!rt_task(p))
 		p->prio = recalc_task_prio(p, now);
 
@@ -3333,6 +3344,7 @@
 		printk(KERN_ERR "BUG: scheduling while atomic: "
 			"%s/0x%08x/%d\n",
 			current->comm, preempt_count(), current->pid);
+		debug_show_held_locks(current);
 		dump_stack();
 	}
 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -4804,18 +4816,18 @@
 		show_stack(p, NULL);
 }
 
-void show_state(void)
+void show_state_filter(unsigned long state_filter)
 {
 	struct task_struct *g, *p;
 
 #if (BITS_PER_LONG == 32)
 	printk("\n"
-	       "                                               sibling\n");
-	printk("  task             PC      pid father child younger older\n");
+	       "                         free                        sibling\n");
+	printk("  task             PC    stack   pid father child younger older\n");
 #else
 	printk("\n"
-	       "                                                       sibling\n");
-	printk("  task                 PC          pid father child younger older\n");
+	       "                                 free                        sibling\n");
+	printk("  task                 PC        stack   pid father child younger older\n");
 #endif
 	read_lock(&tasklist_lock);
 	do_each_thread(g, p) {
@@ -4824,11 +4836,16 @@
 		 * console might take alot of time:
 		 */
 		touch_nmi_watchdog();
-		show_task(p);
+		if (p->state & state_filter)
+			show_task(p);
 	} while_each_thread(g, p);
 
 	read_unlock(&tasklist_lock);
-	debug_show_all_locks();
+	/*
+	 * Only show locks if all tasks are dumped:
+	 */
+	if (state_filter == -1)
+		debug_show_all_locks();
 }
 
 /**
@@ -6723,8 +6740,6 @@
 	    sched_smt_power_savings_store);
 #endif
 
-
-#ifdef CONFIG_HOTPLUG_CPU
 /*
  * Force a reinitialization of the sched domains hierarchy.  The domains
  * and groups cannot be updated in place without racing with the balancing
@@ -6757,7 +6772,6 @@
 
 	return NOTIFY_OK;
 }
-#endif
 
 void __init sched_init_smp(void)
 {
@@ -6867,6 +6881,7 @@
 				" context at %s:%d\n", file, line);
 		printk("in_atomic():%d, irqs_disabled():%d\n",
 			in_atomic(), irqs_disabled());
+		debug_show_held_locks(current);
 		dump_stack();
 	}
 #endif
diff --git a/kernel/signal.c b/kernel/signal.c
index df18c16..ec81def 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -23,6 +23,7 @@
 #include <linux/ptrace.h>
 #include <linux/signal.h>
 #include <linux/capability.h>
+#include <linux/freezer.h>
 #include <asm/param.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -33,7 +34,7 @@
  * SLAB caches for signal bits.
  */
 
-static kmem_cache_t *sigqueue_cachep;
+static struct kmem_cache *sigqueue_cachep;
 
 /*
  * In POSIX a signal is sent either to a specific thread (Linux task)
@@ -1133,8 +1134,7 @@
 	return error;
 }
 
-int
-kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
 {
 	int error;
 	rcu_read_lock();
diff --git a/kernel/softirq.c b/kernel/softirq.c
index bf25015..918e52d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -574,8 +574,6 @@
 
 	switch (action) {
 	case CPU_UP_PREPARE:
-		BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
-		BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
 		p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
 		if (IS_ERR(p)) {
 			printk("ksoftirqd for %i failed\n", hotcpu);
diff --git a/kernel/sys.c b/kernel/sys.c
index 98489d8..a0c1a29 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -880,7 +880,7 @@
 	return 0;
 }
 
-static void deferred_cad(void *dummy)
+static void deferred_cad(struct work_struct *dummy)
 {
 	kernel_restart(NULL);
 }
@@ -892,7 +892,7 @@
  */
 void ctrl_alt_del(void)
 {
-	static DECLARE_WORK(cad_work, deferred_cad, NULL);
+	static DECLARE_WORK(cad_work, deferred_cad);
 
 	if (C_A_D)
 		schedule_work(&cad_work);
@@ -1102,14 +1102,14 @@
 asmlinkage long sys_setuid(uid_t uid)
 {
 	int old_euid = current->euid;
-	int old_ruid, old_suid, new_ruid, new_suid;
+	int old_ruid, old_suid, new_suid;
 	int retval;
 
 	retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
 	if (retval)
 		return retval;
 
-	old_ruid = new_ruid = current->uid;
+	old_ruid = current->uid;
 	old_suid = current->suid;
 	new_suid = old_suid;
 	
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6fc5e17..8e9f00f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -171,7 +171,7 @@
 static ssize_t proc_writesys(struct file *, const char __user *, size_t, loff_t *);
 static int proc_opensys(struct inode *, struct file *);
 
-struct file_operations proc_sys_file_operations = {
+const struct file_operations proc_sys_file_operations = {
 	.open		= proc_opensys,
 	.read		= proc_readsys,
 	.write		= proc_writesys,
@@ -986,17 +986,6 @@
 		.extra1		= &zero,
 	},
 #endif
-#ifdef CONFIG_SWAP
-	{
-		.ctl_name	= VM_SWAP_TOKEN_TIMEOUT,
-		.procname	= "swap_token_timeout",
-		.data		= &swap_token_default_timeout,
-		.maxlen		= sizeof(swap_token_default_timeout),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-		.strategy	= &sysctl_jiffies,
-	},
-#endif
 #ifdef CONFIG_NUMA
 	{
 		.ctl_name	= VM_ZONE_RECLAIM_MODE,
@@ -1895,7 +1884,7 @@
 			p = buf;
 			if (*p == '-' && left > 1) {
 				neg = 1;
-				left--, p++;
+				p++;
 			}
 			if (*p < '0' || *p > '9')
 				break;
@@ -2146,7 +2135,7 @@
 			p = buf;
 			if (*p == '-' && left > 1) {
 				neg = 1;
-				left--, p++;
+				p++;
 			}
 			if (*p < '0' || *p > '9')
 				break;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index d3d2891..4c3476f 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -34,7 +34,7 @@
 
 static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
 static int family_registered;
-kmem_cache_t *taskstats_cache;
+struct kmem_cache *taskstats_cache;
 
 static struct genl_family family = {
 	.id		= GENL_ID_GENERATE,
@@ -69,7 +69,7 @@
 };
 
 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
-			void **replyp, size_t size)
+				size_t size)
 {
 	struct sk_buff *skb;
 	void *reply;
@@ -94,7 +94,6 @@
 	}
 
 	*skbp = skb;
-	*replyp = reply;
 	return 0;
 }
 
@@ -119,10 +118,10 @@
 /*
  * Send taskstats data in @skb to listeners registered for @cpu's exit data
  */
-static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
+static void send_cpu_listeners(struct sk_buff *skb,
+					struct listener_list *listeners)
 {
 	struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
-	struct listener_list *listeners;
 	struct listener *s, *tmp;
 	struct sk_buff *skb_next, *skb_cur = skb;
 	void *reply = genlmsg_data(genlhdr);
@@ -135,7 +134,6 @@
 	}
 
 	rc = 0;
-	listeners = &per_cpu(listener_array, cpu);
 	down_read(&listeners->sem);
 	list_for_each_entry(s, &listeners->list, list) {
 		skb_next = NULL;
@@ -186,6 +184,7 @@
 	} else
 		get_task_struct(tsk);
 
+	memset(stats, 0, sizeof(*stats));
 	/*
 	 * Each accounting subsystem adds calls to its functions to
 	 * fill in relevant parts of struct taskstsats as follows
@@ -228,6 +227,8 @@
 
 	if (first->signal->stats)
 		memcpy(stats, first->signal->stats, sizeof(*stats));
+	else
+		memset(stats, 0, sizeof(*stats));
 
 	tsk = first;
 	do {
@@ -344,14 +345,36 @@
 	return ret;
 }
 
+static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
+{
+	struct nlattr *na, *ret;
+	int aggr;
+
+	aggr = (type == TASKSTATS_TYPE_PID)
+			? TASKSTATS_TYPE_AGGR_PID
+			: TASKSTATS_TYPE_AGGR_TGID;
+
+	na = nla_nest_start(skb, aggr);
+	if (!na)
+		goto err;
+	if (nla_put(skb, type, sizeof(pid), &pid) < 0)
+		goto err;
+	ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
+	if (!ret)
+		goto err;
+	nla_nest_end(skb, na);
+
+	return nla_data(ret);
+err:
+	return NULL;
+}
+
 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 {
 	int rc = 0;
 	struct sk_buff *rep_skb;
-	struct taskstats stats;
-	void *reply;
+	struct taskstats *stats;
 	size_t size;
-	struct nlattr *na;
 	cpumask_t mask;
 
 	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
@@ -372,83 +395,71 @@
 	size = nla_total_size(sizeof(u32)) +
 		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
 
-	memset(&stats, 0, sizeof(stats));
-	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
+	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
 	if (rc < 0)
 		return rc;
 
+	rc = -EINVAL;
 	if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
 		u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
-		rc = fill_pid(pid, NULL, &stats);
-		if (rc < 0)
+		stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
+		if (!stats)
 			goto err;
 
-		na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
-		NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
-		NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-				stats);
+		rc = fill_pid(pid, NULL, stats);
+		if (rc < 0)
+			goto err;
 	} else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
 		u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
-		rc = fill_tgid(tgid, NULL, &stats);
-		if (rc < 0)
+		stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
+		if (!stats)
 			goto err;
 
-		na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
-		NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
-		NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-				stats);
-	} else {
-		rc = -EINVAL;
+		rc = fill_tgid(tgid, NULL, stats);
+		if (rc < 0)
+			goto err;
+	} else
 		goto err;
-	}
-
-	nla_nest_end(rep_skb, na);
 
 	return send_reply(rep_skb, info->snd_pid);
-
-nla_put_failure:
-	rc = genlmsg_cancel(rep_skb, reply);
 err:
 	nlmsg_free(rep_skb);
 	return rc;
 }
 
-void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
+static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
 {
-	struct listener_list *listeners;
-	struct taskstats *tmp;
-	/*
-	 * This is the cpu on which the task is exiting currently and will
-	 * be the one for which the exit event is sent, even if the cpu
-	 * on which this function is running changes later.
-	 */
-	*mycpu = raw_smp_processor_id();
+	struct signal_struct *sig = tsk->signal;
+	struct taskstats *stats;
 
-	*ptidstats = NULL;
-	tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
-	if (!tmp)
-		return;
+	if (sig->stats || thread_group_empty(tsk))
+		goto ret;
 
-	listeners = &per_cpu(listener_array, *mycpu);
-	down_read(&listeners->sem);
-	if (!list_empty(&listeners->list)) {
-		*ptidstats = tmp;
-		tmp = NULL;
+	/* No problem if kmem_cache_zalloc() fails */
+	stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
+
+	spin_lock_irq(&tsk->sighand->siglock);
+	if (!sig->stats) {
+		sig->stats = stats;
+		stats = NULL;
 	}
-	up_read(&listeners->sem);
-	kfree(tmp);
+	spin_unlock_irq(&tsk->sighand->siglock);
+
+	if (stats)
+		kmem_cache_free(taskstats_cache, stats);
+ret:
+	return sig->stats;
 }
 
 /* Send pid data out on exit */
-void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
-			int group_dead, unsigned int mycpu)
+void taskstats_exit(struct task_struct *tsk, int group_dead)
 {
 	int rc;
+	struct listener_list *listeners;
+	struct taskstats *stats;
 	struct sk_buff *rep_skb;
-	void *reply;
 	size_t size;
 	int is_thread_group;
-	struct nlattr *na;
 
 	if (!family_registered)
 		return;
@@ -459,7 +470,7 @@
 	size = nla_total_size(sizeof(u32)) +
 		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
 
-	is_thread_group = (tsk->signal->stats != NULL);
+	is_thread_group = !!taskstats_tgid_alloc(tsk);
 	if (is_thread_group) {
 		/* PID + STATS + TGID + STATS */
 		size = 2 * size;
@@ -467,49 +478,39 @@
 		fill_tgid_exit(tsk);
 	}
 
-	if (!tidstats)
+	listeners = &__raw_get_cpu_var(listener_array);
+	if (list_empty(&listeners->list))
 		return;
 
-	rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
+	rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
 	if (rc < 0)
-		goto ret;
+		return;
 
-	rc = fill_pid(tsk->pid, tsk, tidstats);
+	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
+	if (!stats)
+		goto err;
+
+	rc = fill_pid(tsk->pid, tsk, stats);
 	if (rc < 0)
-		goto err_skb;
-
-	na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
-	NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
-	NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-			*tidstats);
-	nla_nest_end(rep_skb, na);
-
-	if (!is_thread_group)
-		goto send;
+		goto err;
 
 	/*
 	 * Doesn't matter if tsk is the leader or the last group member leaving
 	 */
-	if (!group_dead)
+	if (!is_thread_group || !group_dead)
 		goto send;
 
-	na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
-	NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
-	/* No locking needed for tsk->signal->stats since group is dead */
-	NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-			*tsk->signal->stats);
-	nla_nest_end(rep_skb, na);
+	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
+	if (!stats)
+		goto err;
+
+	memcpy(stats, tsk->signal->stats, sizeof(*stats));
 
 send:
-	send_cpu_listeners(rep_skb, mycpu);
+	send_cpu_listeners(rep_skb, listeners);
 	return;
-
-nla_put_failure:
-	genlmsg_cancel(rep_skb, reply);
-err_skb:
+err:
 	nlmsg_free(rep_skb);
-ret:
-	return;
 }
 
 static struct genl_ops taskstats_ops = {
diff --git a/kernel/user.c b/kernel/user.c
index 220e586..4869563 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -26,7 +26,7 @@
 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
 #define uidhashentry(uid)	(uidhash_table + __uidhashfn((uid)))
 
-static kmem_cache_t *uid_cachep;
+static struct kmem_cache *uid_cachep;
 static struct list_head uidhash_table[UIDHASH_SZ];
 
 /*
@@ -132,7 +132,7 @@
 	if (!up) {
 		struct user_struct *new;
 
-		new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
+		new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
 		if (!new)
 			return NULL;
 		new->uid = uid;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 17c2f03d..c525731 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -29,6 +29,9 @@
 #include <linux/kthread.h>
 #include <linux/hardirq.h>
 #include <linux/mempolicy.h>
+#include <linux/freezer.h>
+#include <linux/kallsyms.h>
+#include <linux/debug_locks.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -55,6 +58,8 @@
 	struct task_struct *thread;
 
 	int run_depth;		/* Detect run_workqueue() recursion depth */
+
+	int freezeable;		/* Freeze the thread during suspend */
 } ____cacheline_aligned;
 
 /*
@@ -80,6 +85,29 @@
 	return list_empty(&wq->list);
 }
 
+static inline void set_wq_data(struct work_struct *work, void *wq)
+{
+	unsigned long new, old, res;
+
+	/* assume the pending flag is already set and that the task has already
+	 * been queued on this workqueue */
+	new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
+	res = work->management;
+	if (res != new) {
+		do {
+			old = res;
+			new = (unsigned long) wq;
+			new |= (old & WORK_STRUCT_FLAG_MASK);
+			res = cmpxchg(&work->management, old, new);
+		} while (res != old);
+	}
+}
+
+static inline void *get_wq_data(struct work_struct *work)
+{
+	return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
+}
+
 /* Preempt must be disabled. */
 static void __queue_work(struct cpu_workqueue_struct *cwq,
 			 struct work_struct *work)
@@ -87,7 +115,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&cwq->lock, flags);
-	work->wq_data = cwq;
+	set_wq_data(work, cwq);
 	list_add_tail(&work->entry, &cwq->worklist);
 	cwq->insert_sequence++;
 	wake_up(&cwq->more_work);
@@ -108,7 +136,7 @@
 {
 	int ret = 0, cpu = get_cpu();
 
-	if (!test_and_set_bit(0, &work->pending)) {
+	if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
 		if (unlikely(is_single_threaded(wq)))
 			cpu = singlethread_cpu;
 		BUG_ON(!list_empty(&work->entry));
@@ -122,38 +150,42 @@
 
 static void delayed_work_timer_fn(unsigned long __data)
 {
-	struct work_struct *work = (struct work_struct *)__data;
-	struct workqueue_struct *wq = work->wq_data;
+	struct delayed_work *dwork = (struct delayed_work *)__data;
+	struct workqueue_struct *wq = get_wq_data(&dwork->work);
 	int cpu = smp_processor_id();
 
 	if (unlikely(is_single_threaded(wq)))
 		cpu = singlethread_cpu;
 
-	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
 }
 
 /**
  * queue_delayed_work - queue work on a workqueue after delay
  * @wq: workqueue to use
- * @work: work to queue
+ * @work: delayable work to queue
  * @delay: number of jiffies to wait before queueing
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int fastcall queue_delayed_work(struct workqueue_struct *wq,
-			struct work_struct *work, unsigned long delay)
+			struct delayed_work *dwork, unsigned long delay)
 {
 	int ret = 0;
-	struct timer_list *timer = &work->timer;
+	struct timer_list *timer = &dwork->timer;
+	struct work_struct *work = &dwork->work;
 
-	if (!test_and_set_bit(0, &work->pending)) {
+	if (delay == 0)
+		return queue_work(wq, work);
+
+	if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
 		BUG_ON(timer_pending(timer));
 		BUG_ON(!list_empty(&work->entry));
 
 		/* This stores wq for the moment, for the timer_fn */
-		work->wq_data = wq;
+		set_wq_data(work, wq);
 		timer->expires = jiffies + delay;
-		timer->data = (unsigned long)work;
+		timer->data = (unsigned long)dwork;
 		timer->function = delayed_work_timer_fn;
 		add_timer(timer);
 		ret = 1;
@@ -172,19 +204,20 @@
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-			struct work_struct *work, unsigned long delay)
+			struct delayed_work *dwork, unsigned long delay)
 {
 	int ret = 0;
-	struct timer_list *timer = &work->timer;
+	struct timer_list *timer = &dwork->timer;
+	struct work_struct *work = &dwork->work;
 
-	if (!test_and_set_bit(0, &work->pending)) {
+	if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
 		BUG_ON(timer_pending(timer));
 		BUG_ON(!list_empty(&work->entry));
 
 		/* This stores wq for the moment, for the timer_fn */
-		work->wq_data = wq;
+		set_wq_data(work, wq);
 		timer->expires = jiffies + delay;
-		timer->data = (unsigned long)work;
+		timer->data = (unsigned long)dwork;
 		timer->function = delayed_work_timer_fn;
 		add_timer_on(timer, cpu);
 		ret = 1;
@@ -212,15 +245,26 @@
 	while (!list_empty(&cwq->worklist)) {
 		struct work_struct *work = list_entry(cwq->worklist.next,
 						struct work_struct, entry);
-		void (*f) (void *) = work->func;
-		void *data = work->data;
+		work_func_t f = work->func;
 
 		list_del_init(cwq->worklist.next);
 		spin_unlock_irqrestore(&cwq->lock, flags);
 
-		BUG_ON(work->wq_data != cwq);
-		clear_bit(0, &work->pending);
-		f(data);
+		BUG_ON(get_wq_data(work) != cwq);
+		if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+			work_release(work);
+		f(work);
+
+		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
+					"%s/0x%08x/%d\n",
+					current->comm, preempt_count(),
+				       	current->pid);
+			printk(KERN_ERR "    last function: ");
+			print_symbol("%s\n", (unsigned long)f);
+			debug_show_held_locks(current);
+			dump_stack();
+		}
 
 		spin_lock_irqsave(&cwq->lock, flags);
 		cwq->remove_sequence++;
@@ -237,7 +281,8 @@
 	struct k_sigaction sa;
 	sigset_t blocked;
 
-	current->flags |= PF_NOFREEZE;
+	if (!cwq->freezeable)
+		current->flags |= PF_NOFREEZE;
 
 	set_user_nice(current, -5);
 
@@ -260,6 +305,9 @@
 
 	set_current_state(TASK_INTERRUPTIBLE);
 	while (!kthread_should_stop()) {
+		if (cwq->freezeable)
+			try_to_freeze();
+
 		add_wait_queue(&cwq->more_work, &wait);
 		if (list_empty(&cwq->worklist))
 			schedule();
@@ -336,7 +384,7 @@
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
-						   int cpu)
+						   int cpu, int freezeable)
 {
 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
 	struct task_struct *p;
@@ -346,6 +394,7 @@
 	cwq->thread = NULL;
 	cwq->insert_sequence = 0;
 	cwq->remove_sequence = 0;
+	cwq->freezeable = freezeable;
 	INIT_LIST_HEAD(&cwq->worklist);
 	init_waitqueue_head(&cwq->more_work);
 	init_waitqueue_head(&cwq->work_done);
@@ -361,7 +410,7 @@
 }
 
 struct workqueue_struct *__create_workqueue(const char *name,
-					    int singlethread)
+					    int singlethread, int freezeable)
 {
 	int cpu, destroy = 0;
 	struct workqueue_struct *wq;
@@ -381,7 +430,7 @@
 	mutex_lock(&workqueue_mutex);
 	if (singlethread) {
 		INIT_LIST_HEAD(&wq->list);
-		p = create_workqueue_thread(wq, singlethread_cpu);
+		p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
 		if (!p)
 			destroy = 1;
 		else
@@ -389,7 +438,7 @@
 	} else {
 		list_add(&wq->list, &workqueues);
 		for_each_online_cpu(cpu) {
-			p = create_workqueue_thread(wq, cpu);
+			p = create_workqueue_thread(wq, cpu, freezeable);
 			if (p) {
 				kthread_bind(p, cpu);
 				wake_up_process(p);
@@ -468,38 +517,37 @@
 
 /**
  * schedule_delayed_work - put work task in global workqueue after delay
- * @work: job to be done
- * @delay: number of jiffies to wait
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue.
  */
-int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
 {
-	return queue_delayed_work(keventd_wq, work, delay);
+	return queue_delayed_work(keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
- * @work: job to be done
+ * @dwork: job to be done
  * @delay: number of jiffies to wait
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue on the specified CPU.
  */
 int schedule_delayed_work_on(int cpu,
-			struct work_struct *work, unsigned long delay)
+			struct delayed_work *dwork, unsigned long delay)
 {
-	return queue_delayed_work_on(cpu, keventd_wq, work, delay);
+	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
 /**
  * schedule_on_each_cpu - call a function on each online CPU from keventd
  * @func: the function to call
- * @info: a pointer to pass to func()
  *
  * Returns zero on success.
  * Returns -ve errno on failure.
@@ -508,7 +556,7 @@
  *
  * schedule_on_each_cpu() is very slow.
  */
-int schedule_on_each_cpu(void (*func)(void *info), void *info)
+int schedule_on_each_cpu(work_func_t func)
 {
 	int cpu;
 	struct work_struct *works;
@@ -519,7 +567,7 @@
 
 	mutex_lock(&workqueue_mutex);
 	for_each_online_cpu(cpu) {
-		INIT_WORK(per_cpu_ptr(works, cpu), func, info);
+		INIT_WORK(per_cpu_ptr(works, cpu), func);
 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
 				per_cpu_ptr(works, cpu));
 	}
@@ -539,12 +587,12 @@
  * cancel_rearming_delayed_workqueue - reliably kill off a delayed
  *			work whose handler rearms the delayed work.
  * @wq:   the controlling workqueue structure
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
-				       struct work_struct *work)
+				       struct delayed_work *dwork)
 {
-	while (!cancel_delayed_work(work))
+	while (!cancel_delayed_work(dwork))
 		flush_workqueue(wq);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,18 +600,17 @@
 /**
  * cancel_rearming_delayed_work - reliably kill off a delayed keventd
  *			work whose handler rearms the delayed work.
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
-void cancel_rearming_delayed_work(struct work_struct *work)
+void cancel_rearming_delayed_work(struct delayed_work *dwork)
 {
-	cancel_rearming_delayed_workqueue(keventd_wq, work);
+	cancel_rearming_delayed_workqueue(keventd_wq, dwork);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_work);
 
 /**
  * execute_in_process_context - reliably execute the routine with user context
  * @fn:		the function to execute
- * @data:	data to pass to the function
  * @ew:		guaranteed storage for the execute work structure (must
  *		be available when the work executes)
  *
@@ -573,15 +620,14 @@
  * Returns:	0 - function was executed
  *		1 - function was scheduled for execution
  */
-int execute_in_process_context(void (*fn)(void *data), void *data,
-			       struct execute_work *ew)
+int execute_in_process_context(work_func_t fn, struct execute_work *ew)
 {
 	if (!in_interrupt()) {
-		fn(data);
+		fn(&ew->work);
 		return 0;
 	}
 
-	INIT_WORK(&ew->work, fn, data);
+	INIT_WORK(&ew->work, fn);
 	schedule_work(&ew->work);
 
 	return 1;
@@ -609,7 +655,6 @@
 
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* Take the work from this (downed) CPU. */
 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 {
@@ -642,7 +687,7 @@
 		mutex_lock(&workqueue_mutex);
 		/* Create a new workqueue thread for it. */
 		list_for_each_entry(wq, &workqueues, list) {
-			if (!create_workqueue_thread(wq, hotcpu)) {
+			if (!create_workqueue_thread(wq, hotcpu, 0)) {
 				printk("workqueue for %i failed\n", hotcpu);
 				return NOTIFY_BAD;
 			}
@@ -692,7 +737,6 @@
 
 	return NOTIFY_OK;
 }
-#endif
 
 void init_workqueues(void)
 {
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d367910..b75fed7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1,6 +1,7 @@
 
 config PRINTK_TIME
 	bool "Show timing information on printks"
+	depends on PRINTK
 	help
 	  Selecting this option causes timing information to be
 	  included in printk output.  This allows you to measure
diff --git a/lib/Makefile b/lib/Makefile
index cf98fab..fea8f90 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,7 +25,7 @@
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
-lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
 obj-$(CONFIG_PLIST) += plist.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 0331ed8..8a5b530 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -16,6 +16,23 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 
+/*
+ *	If a hyphen was found in get_option, this will handle the
+ *	range of numbers, M-N.  This will expand the range and insert
+ *	the values[M, M+1, ..., N] into the ints array in get_options.
+ */
+
+static int get_range(char **str, int *pint)
+{
+	int x, inc_counter, upper_range;
+
+	(*str)++;
+	upper_range = simple_strtol((*str), NULL, 0);
+	inc_counter = upper_range - *pint;
+	for (x = *pint; x < upper_range; x++)
+		*pint++ = x;
+	return inc_counter;
+}
 
 /**
  *	get_option - Parse integer from an option string
@@ -29,6 +46,7 @@
  *	0 : no int in string
  *	1 : int found, no subsequent comma
  *	2 : int found including a subsequent comma
+ *	3 : hyphen found to denote a range
  */
 
 int get_option (char **str, int *pint)
@@ -44,6 +62,8 @@
 		(*str)++;
 		return 2;
 	}
+	if (**str == '-')
+		return 3;
 
 	return 1;
 }
@@ -55,7 +75,8 @@
  *	@ints: integer array
  *
  *	This function parses a string containing a comma-separated
- *	list of integers.  The parse halts when the array is
+ *	list of integers, a hyphen-separated range of _positive_ integers,
+ *	or a combination of both.  The parse halts when the array is
  *	full, or when no more numbers can be retrieved from the
  *	string.
  *
@@ -72,6 +93,18 @@
 		res = get_option ((char **)&str, ints + i);
 		if (res == 0)
 			break;
+		if (res == 3) {
+			int range_nums;
+			range_nums = get_range((char **)&str, ints + i);
+			if (range_nums < 0)
+				break;
+			/*
+			 * Decrement the result by one to leave out the
+			 * last number in the range.  The next iteration
+			 * will handle the upper number in the range
+			 */
+			i += (range_nums - 1);
+		}
 		i++;
 		if (res == 1)
 			break;
diff --git a/lib/idr.c b/lib/idr.c
index 16d2143..7185353 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -33,7 +33,7 @@
 #include <linux/string.h>
 #include <linux/idr.h>
 
-static kmem_cache_t *idr_layer_cache;
+static struct kmem_cache *idr_layer_cache;
 
 static struct idr_layer *alloc_layer(struct idr *idp)
 {
@@ -445,7 +445,7 @@
 }
 EXPORT_SYMBOL(idr_replace);
 
-static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
+static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
 		unsigned long flags)
 {
 	memset(idr_layer, 0, sizeof(struct idr_layer));
diff --git a/lib/kobject.c b/lib/kobject.c
index 744a4b1..7ce6dc1 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -111,10 +111,9 @@
 	len = get_kobj_path_length(kobj);
 	if (len == 0)
 		return NULL;
-	path = kmalloc(len, gfp_mask);
+	path = kzalloc(len, gfp_mask);
 	if (!path)
 		return NULL;
-	memset(path, 0x00, len);
 	fill_kobj_path(kobj, path, len);
 
 	return path;
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 7ba9d82..4350ba9 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -21,13 +21,15 @@
 			      struct list_head *next)
 {
 	if (unlikely(next->prev != prev)) {
-		printk(KERN_ERR "list_add corruption. next->prev should be %p, but was %p\n",
-			prev, next->prev);
+		printk(KERN_ERR "list_add corruption. next->prev should be "
+			"prev (%p), but was %p. (next=%p).\n",
+			prev, next->prev, next);
 		BUG();
 	}
 	if (unlikely(prev->next != next)) {
-		printk(KERN_ERR "list_add corruption. prev->next should be %p, but was %p\n",
-			next, prev->next);
+		printk(KERN_ERR "list_add corruption. prev->next should be "
+			"next (%p), but was %p. (prev=%p).\n",
+			next, prev->next, prev);
 		BUG();
 	}
 	next->prev = new;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 7945787..280332c 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -963,7 +963,9 @@
 			printk("failed|");
 		} else {
 			unexpected_testcase_failures++;
+
 			printk("FAILED|");
+			dump_stack();
 		}
 	} else {
 		testcase_successes++;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index aa9bfd0..d69ddbe 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2,6 +2,7 @@
  * Copyright (C) 2001 Momchil Velikov
  * Portions Copyright (C) 2001 Christoph Hellwig
  * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
+ * Copyright (C) 2006 Nick Piggin
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -30,6 +31,7 @@
 #include <linux/gfp.h>
 #include <linux/string.h>
 #include <linux/bitops.h>
+#include <linux/rcupdate.h>
 
 
 #ifdef __KERNEL__
@@ -45,7 +47,9 @@
 	((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
 struct radix_tree_node {
+	unsigned int	height;		/* Height from the bottom */
 	unsigned int	count;
+	struct rcu_head	rcu_head;
 	void		*slots[RADIX_TREE_MAP_SIZE];
 	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
@@ -63,7 +67,7 @@
 /*
  * Radix tree node cache.
  */
-static kmem_cache_t *radix_tree_node_cachep;
+static struct kmem_cache *radix_tree_node_cachep;
 
 /*
  * Per-cpu pool of preloaded nodes
@@ -100,13 +104,21 @@
 			rtp->nr--;
 		}
 	}
+	BUG_ON(radix_tree_is_direct_ptr(ret));
 	return ret;
 }
 
+static void radix_tree_node_rcu_free(struct rcu_head *head)
+{
+	struct radix_tree_node *node =
+			container_of(head, struct radix_tree_node, rcu_head);
+	kmem_cache_free(radix_tree_node_cachep, node);
+}
+
 static inline void
 radix_tree_node_free(struct radix_tree_node *node)
 {
-	kmem_cache_free(radix_tree_node_cachep, node);
+	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
 }
 
 /*
@@ -222,11 +234,12 @@
 	}
 
 	do {
+		unsigned int newheight;
 		if (!(node = radix_tree_node_alloc(root)))
 			return -ENOMEM;
 
 		/* Increase the height.  */
-		node->slots[0] = root->rnode;
+		node->slots[0] = radix_tree_direct_to_ptr(root->rnode);
 
 		/* Propagate the aggregated tag info into the new root */
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
@@ -234,9 +247,11 @@
 				tag_set(node, tag, 0);
 		}
 
+		newheight = root->height+1;
+		node->height = newheight;
 		node->count = 1;
-		root->rnode = node;
-		root->height++;
+		rcu_assign_pointer(root->rnode, node);
+		root->height = newheight;
 	} while (height > root->height);
 out:
 	return 0;
@@ -258,6 +273,8 @@
 	int offset;
 	int error;
 
+	BUG_ON(radix_tree_is_direct_ptr(item));
+
 	/* Make sure the tree is high enough.  */
 	if (index > radix_tree_maxindex(root->height)) {
 		error = radix_tree_extend(root, index);
@@ -275,11 +292,12 @@
 			/* Have to add a child node.  */
 			if (!(slot = radix_tree_node_alloc(root)))
 				return -ENOMEM;
+			slot->height = height;
 			if (node) {
-				node->slots[offset] = slot;
+				rcu_assign_pointer(node->slots[offset], slot);
 				node->count++;
 			} else
-				root->rnode = slot;
+				rcu_assign_pointer(root->rnode, slot);
 		}
 
 		/* Go a level down */
@@ -295,11 +313,11 @@
 
 	if (node) {
 		node->count++;
-		node->slots[offset] = item;
+		rcu_assign_pointer(node->slots[offset], item);
 		BUG_ON(tag_get(node, 0, offset));
 		BUG_ON(tag_get(node, 1, offset));
 	} else {
-		root->rnode = item;
+		rcu_assign_pointer(root->rnode, radix_tree_ptr_to_direct(item));
 		BUG_ON(root_tag_get(root, 0));
 		BUG_ON(root_tag_get(root, 1));
 	}
@@ -308,48 +326,53 @@
 }
 EXPORT_SYMBOL(radix_tree_insert);
 
-static inline void **__lookup_slot(struct radix_tree_root *root,
-				   unsigned long index)
-{
-	unsigned int height, shift;
-	struct radix_tree_node **slot;
-
-	height = root->height;
-
-	if (index > radix_tree_maxindex(height))
-		return NULL;
-
-	if (height == 0 && root->rnode)
-		return (void **)&root->rnode;
-
-	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-	slot = &root->rnode;
-
-	while (height > 0) {
-		if (*slot == NULL)
-			return NULL;
-
-		slot = (struct radix_tree_node **)
-			((*slot)->slots +
-				((index >> shift) & RADIX_TREE_MAP_MASK));
-		shift -= RADIX_TREE_MAP_SHIFT;
-		height--;
-	}
-
-	return (void **)slot;
-}
-
 /**
  *	radix_tree_lookup_slot    -    lookup a slot in a radix tree
  *	@root:		radix tree root
  *	@index:		index key
  *
- *	Lookup the slot corresponding to the position @index in the radix tree
- *	@root. This is useful for update-if-exists operations.
+ *	Returns:  the slot corresponding to the position @index in the
+ *	radix tree @root. This is useful for update-if-exists operations.
+ *
+ *	This function cannot be called under rcu_read_lock, it must be
+ *	excluded from writers, as must the returned slot for subsequent
+ *	use by radix_tree_deref_slot() and radix_tree_replace slot.
+ *	Caller must hold tree write locked across slot lookup and
+ *	replace.
  */
 void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
 {
-	return __lookup_slot(root, index);
+	unsigned int height, shift;
+	struct radix_tree_node *node, **slot;
+
+	node = root->rnode;
+	if (node == NULL)
+		return NULL;
+
+	if (radix_tree_is_direct_ptr(node)) {
+		if (index > 0)
+			return NULL;
+		return (void **)&root->rnode;
+	}
+
+	height = node->height;
+	if (index > radix_tree_maxindex(height))
+		return NULL;
+
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
+
+	do {
+		slot = (struct radix_tree_node **)
+			(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
+		node = *slot;
+		if (node == NULL)
+			return NULL;
+
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	} while (height > 0);
+
+	return (void **)slot;
 }
 EXPORT_SYMBOL(radix_tree_lookup_slot);
 
@@ -359,13 +382,45 @@
  *	@index:		index key
  *
  *	Lookup the item at the position @index in the radix tree @root.
+ *
+ *	This function can be called under rcu_read_lock, however the caller
+ *	must manage lifetimes of leaf nodes (eg. RCU may also be used to free
+ *	them safely). No RCU barriers are required to access or modify the
+ *	returned item, however.
  */
 void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
 {
-	void **slot;
+	unsigned int height, shift;
+	struct radix_tree_node *node, **slot;
 
-	slot = __lookup_slot(root, index);
-	return slot != NULL ? *slot : NULL;
+	node = rcu_dereference(root->rnode);
+	if (node == NULL)
+		return NULL;
+
+	if (radix_tree_is_direct_ptr(node)) {
+		if (index > 0)
+			return NULL;
+		return radix_tree_direct_to_ptr(node);
+	}
+
+	height = node->height;
+	if (index > radix_tree_maxindex(height))
+		return NULL;
+
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
+
+	do {
+		slot = (struct radix_tree_node **)
+			(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
+		node = rcu_dereference(*slot);
+		if (node == NULL)
+			return NULL;
+
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	} while (height > 0);
+
+	return node;
 }
 EXPORT_SYMBOL(radix_tree_lookup);
 
@@ -495,27 +550,30 @@
 			unsigned long index, unsigned int tag)
 {
 	unsigned int height, shift;
-	struct radix_tree_node *slot;
+	struct radix_tree_node *node;
 	int saw_unset_tag = 0;
 
-	height = root->height;
-	if (index > radix_tree_maxindex(height))
-		return 0;
-
 	/* check the root's tag bit */
 	if (!root_tag_get(root, tag))
 		return 0;
 
-	if (height == 0)
-		return 1;
+	node = rcu_dereference(root->rnode);
+	if (node == NULL)
+		return 0;
+
+	if (radix_tree_is_direct_ptr(node))
+		return (index == 0);
+
+	height = node->height;
+	if (index > radix_tree_maxindex(height))
+		return 0;
 
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-	slot = root->rnode;
 
 	for ( ; ; ) {
 		int offset;
 
-		if (slot == NULL)
+		if (node == NULL)
 			return 0;
 
 		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
@@ -524,15 +582,15 @@
 		 * This is just a debug check.  Later, we can bale as soon as
 		 * we see an unset tag.
 		 */
-		if (!tag_get(slot, tag, offset))
+		if (!tag_get(node, tag, offset))
 			saw_unset_tag = 1;
 		if (height == 1) {
-			int ret = tag_get(slot, tag, offset);
+			int ret = tag_get(node, tag, offset);
 
 			BUG_ON(ret && saw_unset_tag);
 			return !!ret;
 		}
-		slot = slot->slots[offset];
+		node = rcu_dereference(node->slots[offset]);
 		shift -= RADIX_TREE_MAP_SHIFT;
 		height--;
 	}
@@ -541,47 +599,45 @@
 #endif
 
 static unsigned int
-__lookup(struct radix_tree_root *root, void **results, unsigned long index,
+__lookup(struct radix_tree_node *slot, void **results, unsigned long index,
 	unsigned int max_items, unsigned long *next_index)
 {
 	unsigned int nr_found = 0;
 	unsigned int shift, height;
-	struct radix_tree_node *slot;
 	unsigned long i;
 
-	height = root->height;
-	if (height == 0) {
-		if (root->rnode && index == 0)
-			results[nr_found++] = root->rnode;
+	height = slot->height;
+	if (height == 0)
 		goto out;
-	}
-
 	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-	slot = root->rnode;
 
 	for ( ; height > 1; height--) {
-
-		for (i = (index >> shift) & RADIX_TREE_MAP_MASK ;
-				i < RADIX_TREE_MAP_SIZE; i++) {
+		i = (index >> shift) & RADIX_TREE_MAP_MASK;
+		for (;;) {
 			if (slot->slots[i] != NULL)
 				break;
 			index &= ~((1UL << shift) - 1);
 			index += 1UL << shift;
 			if (index == 0)
 				goto out;	/* 32-bit wraparound */
+			i++;
+			if (i == RADIX_TREE_MAP_SIZE)
+				goto out;
 		}
-		if (i == RADIX_TREE_MAP_SIZE)
-			goto out;
 
 		shift -= RADIX_TREE_MAP_SHIFT;
-		slot = slot->slots[i];
+		slot = rcu_dereference(slot->slots[i]);
+		if (slot == NULL)
+			goto out;
 	}
 
 	/* Bottom level: grab some items */
 	for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
+		struct radix_tree_node *node;
 		index++;
-		if (slot->slots[i]) {
-			results[nr_found++] = slot->slots[i];
+		node = slot->slots[i];
+		if (node) {
+			results[nr_found++] = rcu_dereference(node);
 			if (nr_found == max_items)
 				goto out;
 		}
@@ -603,28 +659,51 @@
  *	*@results.
  *
  *	The implementation is naive.
+ *
+ *	Like radix_tree_lookup, radix_tree_gang_lookup may be called under
+ *	rcu_read_lock. In this case, rather than the returned results being
+ *	an atomic snapshot of the tree at a single point in time, the semantics
+ *	of an RCU protected gang lookup are as though multiple radix_tree_lookups
+ *	have been issued in individual locks, and results stored in 'results'.
  */
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
 			unsigned long first_index, unsigned int max_items)
 {
-	const unsigned long max_index = radix_tree_maxindex(root->height);
+	unsigned long max_index;
+	struct radix_tree_node *node;
 	unsigned long cur_index = first_index;
-	unsigned int ret = 0;
+	unsigned int ret;
 
+	node = rcu_dereference(root->rnode);
+	if (!node)
+		return 0;
+
+	if (radix_tree_is_direct_ptr(node)) {
+		if (first_index > 0)
+			return 0;
+		node = radix_tree_direct_to_ptr(node);
+		results[0] = rcu_dereference(node);
+		return 1;
+	}
+
+	max_index = radix_tree_maxindex(node->height);
+
+	ret = 0;
 	while (ret < max_items) {
 		unsigned int nr_found;
 		unsigned long next_index;	/* Index of next search */
 
 		if (cur_index > max_index)
 			break;
-		nr_found = __lookup(root, results + ret, cur_index,
+		nr_found = __lookup(node, results + ret, cur_index,
 					max_items - ret, &next_index);
 		ret += nr_found;
 		if (next_index == 0)
 			break;
 		cur_index = next_index;
 	}
+
 	return ret;
 }
 EXPORT_SYMBOL(radix_tree_gang_lookup);
@@ -634,55 +713,64 @@
  * open-coding the search.
  */
 static unsigned int
-__lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
+__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
 	unsigned int max_items, unsigned long *next_index, unsigned int tag)
 {
 	unsigned int nr_found = 0;
-	unsigned int shift;
-	unsigned int height = root->height;
-	struct radix_tree_node *slot;
+	unsigned int shift, height;
 
-	if (height == 0) {
-		if (root->rnode && index == 0)
-			results[nr_found++] = root->rnode;
+	height = slot->height;
+	if (height == 0)
 		goto out;
-	}
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
 
-	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-	slot = root->rnode;
+	while (height > 0) {
+		unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK ;
 
-	do {
-		unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
-
-		for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
-			if (tag_get(slot, tag, i)) {
-				BUG_ON(slot->slots[i] == NULL);
+		for (;;) {
+			if (tag_get(slot, tag, i))
 				break;
-			}
 			index &= ~((1UL << shift) - 1);
 			index += 1UL << shift;
 			if (index == 0)
 				goto out;	/* 32-bit wraparound */
+			i++;
+			if (i == RADIX_TREE_MAP_SIZE)
+				goto out;
 		}
-		if (i == RADIX_TREE_MAP_SIZE)
-			goto out;
 		height--;
 		if (height == 0) {	/* Bottom level: grab some items */
 			unsigned long j = index & RADIX_TREE_MAP_MASK;
 
 			for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
+				struct radix_tree_node *node;
 				index++;
-				if (tag_get(slot, tag, j)) {
-					BUG_ON(slot->slots[j] == NULL);
-					results[nr_found++] = slot->slots[j];
+				if (!tag_get(slot, tag, j))
+					continue;
+				node = slot->slots[j];
+				/*
+				 * Even though the tag was found set, we need to
+				 * recheck that we have a non-NULL node, because
+				 * if this lookup is lockless, it may have been
+				 * subsequently deleted.
+				 *
+				 * Similar care must be taken in any place that
+				 * lookup ->slots[x] without a lock (ie. can't
+				 * rely on its value remaining the same).
+				 */
+				if (node) {
+					node = rcu_dereference(node);
+					results[nr_found++] = node;
 					if (nr_found == max_items)
 						goto out;
 				}
 			}
 		}
 		shift -= RADIX_TREE_MAP_SHIFT;
-		slot = slot->slots[i];
-	} while (height > 0);
+		slot = rcu_dereference(slot->slots[i]);
+		if (slot == NULL)
+			break;
+	}
 out:
 	*next_index = index;
 	return nr_found;
@@ -706,27 +794,44 @@
 		unsigned long first_index, unsigned int max_items,
 		unsigned int tag)
 {
-	const unsigned long max_index = radix_tree_maxindex(root->height);
+	struct radix_tree_node *node;
+	unsigned long max_index;
 	unsigned long cur_index = first_index;
-	unsigned int ret = 0;
+	unsigned int ret;
 
 	/* check the root's tag bit */
 	if (!root_tag_get(root, tag))
 		return 0;
 
+	node = rcu_dereference(root->rnode);
+	if (!node)
+		return 0;
+
+	if (radix_tree_is_direct_ptr(node)) {
+		if (first_index > 0)
+			return 0;
+		node = radix_tree_direct_to_ptr(node);
+		results[0] = rcu_dereference(node);
+		return 1;
+	}
+
+	max_index = radix_tree_maxindex(node->height);
+
+	ret = 0;
 	while (ret < max_items) {
 		unsigned int nr_found;
 		unsigned long next_index;	/* Index of next search */
 
 		if (cur_index > max_index)
 			break;
-		nr_found = __lookup_tag(root, results + ret, cur_index,
+		nr_found = __lookup_tag(node, results + ret, cur_index,
 					max_items - ret, &next_index, tag);
 		ret += nr_found;
 		if (next_index == 0)
 			break;
 		cur_index = next_index;
 	}
+
 	return ret;
 }
 EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
@@ -742,8 +847,19 @@
 			root->rnode->count == 1 &&
 			root->rnode->slots[0]) {
 		struct radix_tree_node *to_free = root->rnode;
+		void *newptr;
 
-		root->rnode = to_free->slots[0];
+		/*
+		 * We don't need rcu_assign_pointer(), since we are simply
+		 * moving the node from one part of the tree to another. If
+		 * it was safe to dereference the old pointer to it
+		 * (to_free->slots[0]), it will be safe to dereference the new
+		 * one (root->rnode).
+		 */
+		newptr = to_free->slots[0];
+		if (root->height == 1)
+			newptr = radix_tree_ptr_to_direct(newptr);
+		root->rnode = newptr;
 		root->height--;
 		/* must only free zeroed nodes into the slab */
 		tag_clear(to_free, 0, 0);
@@ -767,6 +883,7 @@
 {
 	struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
 	struct radix_tree_node *slot = NULL;
+	struct radix_tree_node *to_free;
 	unsigned int height, shift;
 	int tag;
 	int offset;
@@ -777,6 +894,7 @@
 
 	slot = root->rnode;
 	if (height == 0 && root->rnode) {
+		slot = radix_tree_direct_to_ptr(slot);
 		root_tag_clear_all(root);
 		root->rnode = NULL;
 		goto out;
@@ -809,10 +927,17 @@
 			radix_tree_tag_clear(root, index, tag);
 	}
 
+	to_free = NULL;
 	/* Now free the nodes we do not need anymore */
 	while (pathp->node) {
 		pathp->node->slots[pathp->offset] = NULL;
 		pathp->node->count--;
+		/*
+		 * Queue the node for deferred freeing after the
+		 * last reference to it disappears (set NULL, above).
+		 */
+		if (to_free)
+			radix_tree_node_free(to_free);
 
 		if (pathp->node->count) {
 			if (pathp->node == root->rnode)
@@ -821,13 +946,15 @@
 		}
 
 		/* Node with zero slots in use so free it */
-		radix_tree_node_free(pathp->node);
-
+		to_free = pathp->node;
 		pathp--;
+
 	}
 	root_tag_clear_all(root);
 	root->height = 0;
 	root->rnode = NULL;
+	if (to_free)
+		radix_tree_node_free(to_free);
 
 out:
 	return slot;
@@ -846,7 +973,7 @@
 EXPORT_SYMBOL(radix_tree_tagged);
 
 static void
-radix_tree_node_ctor(void *node, kmem_cache_t *cachep, unsigned long flags)
+radix_tree_node_ctor(void *node, struct kmem_cache *cachep, unsigned long flags)
 {
 	memset(node, 0, sizeof(struct radix_tree_node));
 }
@@ -869,7 +996,6 @@
 		height_to_maxindex[i] = __maxindex(i);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int radix_tree_callback(struct notifier_block *nfb,
                             unsigned long action,
                             void *hcpu)
@@ -889,7 +1015,6 @@
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init radix_tree_init(void)
 {
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index eaa9abe..b2486cf 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -17,10 +17,9 @@
 void percpu_depopulate(void *__pdata, int cpu)
 {
 	struct percpu_data *pdata = __percpu_disguise(__pdata);
-	if (pdata->ptrs[cpu]) {
-		kfree(pdata->ptrs[cpu]);
-		pdata->ptrs[cpu] = NULL;
-	}
+
+	kfree(pdata->ptrs[cpu]);
+	pdata->ptrs[cpu] = NULL;
 }
 EXPORT_SYMBOL_GPL(percpu_depopulate);
 
@@ -123,6 +122,8 @@
  */
 void percpu_free(void *__pdata)
 {
+	if (unlikely(!__pdata))
+		return;
 	__percpu_depopulate_mask(__pdata, &cpu_possible_map);
 	kfree(__percpu_disguise(__pdata));
 }
diff --git a/mm/bootmem.c b/mm/bootmem.c
index d53112f..00a9697 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -27,8 +27,6 @@
 unsigned long min_low_pfn;
 unsigned long max_pfn;
 
-EXPORT_UNUSED_SYMBOL(max_pfn);  /*  June 2006  */
-
 static LIST_HEAD(bdata_list);
 #ifdef CONFIG_CRASH_DUMP
 /*
@@ -196,6 +194,10 @@
 	if (limit && bdata->node_boot_start >= limit)
 		return NULL;
 
+	/* on nodes without memory - bootmem_map is NULL */
+	if (!bdata->node_bootmem_map)
+		return NULL;
+
 	end_pfn = bdata->node_low_pfn;
 	limit = PFN_DOWN(limit);
 	if (limit && end_pfn > limit)
diff --git a/mm/filemap.c b/mm/filemap.c
index 13df01c..af7e2f5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1445,7 +1445,6 @@
 	 * effect.
 	 */
 	error = page_cache_read(file, pgoff);
-	grab_swap_token();
 
 	/*
 	 * The page we want has now been added to the page cache.
diff --git a/mm/fremap.c b/mm/fremap.c
index 7a9d0f5..b77a002 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -101,7 +101,6 @@
 {
 	int err = -ENOMEM;
 	pte_t *pte;
-	pte_t pte_val;
 	spinlock_t *ptl;
 
 	pte = get_locked_pte(mm, addr, &ptl);
@@ -114,7 +113,6 @@
 	}
 
 	set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
-	pte_val = *pte;
 	/*
 	 * We don't need to run update_mmu_cache() here because the "file pte"
 	 * being installed by install_file_pte() is not a real pte - it's a
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a088f59..0ccc7f2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -109,7 +109,7 @@
 	if (nid == MAX_NUMNODES)
 		nid = first_node(node_online_map);
 	if (page) {
-		page[1].lru.next = (void *)free_huge_page;	/* dtor */
+		set_compound_page_dtor(page, free_huge_page);
 		spin_lock(&hugetlb_lock);
 		nr_huge_pages++;
 		nr_huge_pages_node[page_to_nid(page)]++;
@@ -344,7 +344,6 @@
 			entry = *src_pte;
 			ptepage = pte_page(entry);
 			get_page(ptepage);
-			add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
 			set_huge_pte_at(dst, addr, dst_pte, entry);
 		}
 		spin_unlock(&src->page_table_lock);
@@ -365,6 +364,11 @@
 	pte_t pte;
 	struct page *page;
 	struct page *tmp;
+	/*
+	 * A page gathering list, protected by per file i_mmap_lock. The
+	 * lock is used to avoid list corruption from multiple unmapping
+	 * of the same page since we are using page->lru.
+	 */
 	LIST_HEAD(page_list);
 
 	WARN_ON(!is_vm_hugetlb_page(vma));
@@ -372,24 +376,21 @@
 	BUG_ON(end & ~HPAGE_MASK);
 
 	spin_lock(&mm->page_table_lock);
-
-	/* Update high watermark before we lower rss */
-	update_hiwater_rss(mm);
-
 	for (address = start; address < end; address += HPAGE_SIZE) {
 		ptep = huge_pte_offset(mm, address);
 		if (!ptep)
 			continue;
 
+		if (huge_pmd_unshare(mm, &address, ptep))
+			continue;
+
 		pte = huge_ptep_get_and_clear(mm, address, ptep);
 		if (pte_none(pte))
 			continue;
 
 		page = pte_page(pte);
 		list_add(&page->lru, &page_list);
-		add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
 	}
-
 	spin_unlock(&mm->page_table_lock);
 	flush_tlb_range(vma, start, end);
 	list_for_each_entry_safe(page, tmp, &page_list, lru) {
@@ -515,7 +516,6 @@
 	if (!pte_none(*ptep))
 		goto backout;
 
-	add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
 				&& (vma->vm_flags & VM_SHARED)));
 	set_huge_pte_at(mm, address, ptep, new_pte);
@@ -653,11 +653,14 @@
 	BUG_ON(address >= end);
 	flush_cache_range(vma, address, end);
 
+	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
 	spin_lock(&mm->page_table_lock);
 	for (; address < end; address += HPAGE_SIZE) {
 		ptep = huge_pte_offset(mm, address);
 		if (!ptep)
 			continue;
+		if (huge_pmd_unshare(mm, &address, ptep))
+			continue;
 		if (!pte_none(*ptep)) {
 			pte = huge_ptep_get_and_clear(mm, address, ptep);
 			pte = pte_mkhuge(pte_modify(pte, newprot));
@@ -666,6 +669,7 @@
 		}
 	}
 	spin_unlock(&mm->page_table_lock);
+	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
 
 	flush_tlb_range(vma, start, end);
 }
diff --git a/mm/memory.c b/mm/memory.c
index 156861f..4198df0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1902,7 +1902,6 @@
 
 	return 0;
 }
-EXPORT_UNUSED_SYMBOL(vmtruncate_range);  /*  June 2006  */
 
 /**
  * swapin_readahead - swap in pages in hope we need them soon
@@ -1991,6 +1990,7 @@
 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
 	page = lookup_swap_cache(entry);
 	if (!page) {
+		grab_swap_token(); /* Contend for token _before_ read-in */
  		swapin_readahead(entry, address, vma);
  		page = read_swap_cache_async(entry, vma, address);
 		if (!page) {
@@ -2008,7 +2008,6 @@
 		/* Had to read the page from swap area: Major fault */
 		ret = VM_FAULT_MAJOR;
 		count_vm_event(PGMAJFAULT);
-		grab_swap_token();
 	}
 
 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index fd678a6..0c055a09 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -72,7 +72,6 @@
 			return ret;
 	}
 	memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
-	zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
 	return 0;
 }
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 617fb31..b917d6f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -141,9 +141,11 @@
 	enum zone_type k;
 
 	max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
+	max++;			/* space for zlcache_ptr (see mmzone.h) */
 	zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
 	if (!zl)
 		return NULL;
+	zl->zlcache_ptr = NULL;
 	num = 0;
 	/* First put in the highest zones from all nodes, then all the next 
 	   lower zones etc. Avoid empty zones because the memory allocator
@@ -219,7 +221,7 @@
 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	do {
 		struct page *page;
-		unsigned int nid;
+		int nid;
 
 		if (!pte_present(*pte))
 			continue;
@@ -1324,7 +1326,7 @@
 	atomic_set(&new->refcnt, 1);
 	if (new->policy == MPOL_BIND) {
 		int sz = ksize(old->v.zonelist);
-		new->v.zonelist = kmemdup(old->v.zonelist, sz, SLAB_KERNEL);
+		new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
 		if (!new->v.zonelist) {
 			kmem_cache_free(policy_cache, new);
 			return ERR_PTR(-ENOMEM);
@@ -1705,8 +1707,8 @@
  * Display pages allocated per node and memory policy via /proc.
  */
 
-static const char *policy_types[] = { "default", "prefer", "bind",
-				      "interleave" };
+static const char * const policy_types[] =
+	{ "default", "prefer", "bind", "interleave" };
 
 /*
  * Convert a mempolicy into a string.
diff --git a/mm/migrate.c b/mm/migrate.c
index b4979d4..e9b161b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -294,7 +294,7 @@
 static int migrate_page_move_mapping(struct address_space *mapping,
 		struct page *newpage, struct page *page)
 {
-	struct page **radix_pointer;
+	void **pslot;
 
 	if (!mapping) {
 		/* Anonymous page */
@@ -305,12 +305,11 @@
 
 	write_lock_irq(&mapping->tree_lock);
 
-	radix_pointer = (struct page **)radix_tree_lookup_slot(
-						&mapping->page_tree,
-						page_index(page));
+	pslot = radix_tree_lookup_slot(&mapping->page_tree,
+ 					page_index(page));
 
 	if (page_count(page) != 2 + !!PagePrivate(page) ||
-			*radix_pointer != page) {
+			(struct page *)radix_tree_deref_slot(pslot) != page) {
 		write_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;
 	}
@@ -318,7 +317,7 @@
 	/*
 	 * Now we know that no one else is looking at the page.
 	 */
-	get_page(newpage);
+	get_page(newpage);	/* add cache reference */
 #ifdef CONFIG_SWAP
 	if (PageSwapCache(page)) {
 		SetPageSwapCache(newpage);
@@ -326,8 +325,14 @@
 	}
 #endif
 
-	*radix_pointer = newpage;
+	radix_tree_replace_slot(pslot, newpage);
+
+	/*
+	 * Drop cache reference from old page.
+	 * We know this isn't the last reference.
+	 */
 	__put_page(page);
+
 	write_unlock_irq(&mapping->tree_lock);
 
 	return 0;
diff --git a/mm/mlock.c b/mm/mlock.c
index b90c595..3446b7e 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -65,7 +65,7 @@
 			ret = make_pages_present(start, end);
 	}
 
-	vma->vm_mm->locked_vm -= pages;
+	mm->locked_vm -= pages;
 out:
 	if (ret == -ENOMEM)
 		ret = -EAGAIN;
diff --git a/mm/mmap.c b/mm/mmap.c
index 7b40abd..7be110e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1736,7 +1736,7 @@
 	if (mm->map_count >= sysctl_max_map_count)
 		return -ENOMEM;
 
-	new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!new)
 		return -ENOMEM;
 
@@ -2057,7 +2057,7 @@
 		    vma_start < new_vma->vm_end)
 			*vmap = new_vma;
 	} else {
-		new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+		new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 		if (new_vma) {
 			*new_vma = *vma;
 			pol = mpol_copy(vma_policy(vma));
diff --git a/mm/mmzone.c b/mm/mmzone.c
index febea1c..eb58386 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -14,8 +14,6 @@
 	return NODE_DATA(first_online_node);
 }
 
-EXPORT_UNUSED_SYMBOL(first_online_pgdat);  /*  June 2006  */
-
 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
 {
 	int nid = next_online_node(pgdat->node_id);
@@ -24,8 +22,6 @@
 		return NULL;
 	return NODE_DATA(nid);
 }
-EXPORT_UNUSED_SYMBOL(next_online_pgdat);  /*  June 2006  */
-
 
 /*
  * next_zone - helper magic for for_each_zone()
@@ -45,5 +41,4 @@
 	}
 	return zone;
 }
-EXPORT_UNUSED_SYMBOL(next_zone);  /*  June 2006  */
 
diff --git a/mm/nommu.c b/mm/nommu.c
index 8bdde95..af87456 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -497,15 +497,17 @@
 	    (flags & MAP_TYPE) != MAP_SHARED)
 		return -EINVAL;
 
-	if (PAGE_ALIGN(len) == 0)
-		return addr;
-
-	if (len > TASK_SIZE)
+	if (!len)
 		return -EINVAL;
 
+	/* Careful about overflows.. */
+	len = PAGE_ALIGN(len);
+	if (!len || len > TASK_SIZE)
+		return -ENOMEM;
+
 	/* offset overflow? */
 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
-		return -EINVAL;
+		return -EOVERFLOW;
 
 	if (file) {
 		/* validate file mapping requests */
@@ -806,10 +808,9 @@
 	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
 
 	/* we're going to need to record the mapping if it works */
-	vml = kmalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
+	vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
 	if (!vml)
 		goto error_getting_vml;
-	memset(vml, 0, sizeof(*vml));
 
 	down_write(&nommu_vma_sem);
 
@@ -885,11 +886,10 @@
 	}
 
 	/* we're going to need a VMA struct as well */
-	vma = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
+	vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
 	if (!vma)
 		goto error_getting_vma;
 
-	memset(vma, 0, sizeof(*vma));
 	INIT_LIST_HEAD(&vma->anon_vma_node);
 	atomic_set(&vma->vm_usage, 1);
 	if (file)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 2e3ce3a..223d9cc 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -264,7 +264,7 @@
  * flag though it's unlikely that  we select a process with CAP_SYS_RAW_IO
  * set.
  */
-static void __oom_kill_task(struct task_struct *p, const char *message)
+static void __oom_kill_task(struct task_struct *p, int verbose)
 {
 	if (is_init(p)) {
 		WARN_ON(1);
@@ -278,10 +278,8 @@
 		return;
 	}
 
-	if (message) {
-		printk(KERN_ERR "%s: Killed process %d (%s).\n",
-				message, p->pid, p->comm);
-	}
+	if (verbose)
+		printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm);
 
 	/*
 	 * We give our sacrificial lamb high priority and access to
@@ -294,7 +292,7 @@
 	force_sig(SIGKILL, p);
 }
 
-static int oom_kill_task(struct task_struct *p, const char *message)
+static int oom_kill_task(struct task_struct *p)
 {
 	struct mm_struct *mm;
 	struct task_struct *g, *q;
@@ -313,15 +311,25 @@
 	if (mm == NULL)
 		return 1;
 
-	__oom_kill_task(p, message);
+	/*
+	 * Don't kill the process if any threads are set to OOM_DISABLE
+	 */
+	do_each_thread(g, q) {
+		if (q->mm == mm && p->oomkilladj == OOM_DISABLE)
+			return 1;
+	} while_each_thread(g, q);
+
+	__oom_kill_task(p, 1);
+
 	/*
 	 * kill all processes that share the ->mm (i.e. all threads),
-	 * but are in a different thread group
+	 * but are in a different thread group. Don't let them have access
+	 * to memory reserves though, otherwise we might deplete all memory.
 	 */
-	do_each_thread(g, q)
+	do_each_thread(g, q) {
 		if (q->mm == mm && q->tgid != p->tgid)
-			__oom_kill_task(q, message);
-	while_each_thread(g, q);
+			force_sig(SIGKILL, p);
+	} while_each_thread(g, q);
 
 	return 0;
 }
@@ -337,21 +345,22 @@
 	 * its children or threads, just set TIF_MEMDIE so it can die quickly
 	 */
 	if (p->flags & PF_EXITING) {
-		__oom_kill_task(p, NULL);
+		__oom_kill_task(p, 0);
 		return 0;
 	}
 
-	printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li"
-			" and children.\n", p->pid, p->comm, points);
+	printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
+					message, p->pid, p->comm, points);
+
 	/* Try to kill a child first */
 	list_for_each(tsk, &p->children) {
 		c = list_entry(tsk, struct task_struct, sibling);
 		if (c->mm == p->mm)
 			continue;
-		if (!oom_kill_task(c, message))
+		if (!oom_kill_task(c))
 			return 0;
 	}
-	return oom_kill_task(p, message);
+	return oom_kill_task(p);
 }
 
 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aa6fcc7..cace22b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -83,14 +83,7 @@
 
 EXPORT_SYMBOL(totalram_pages);
 
-/*
- * Used by page_zone() to look up the address of the struct zone whose
- * id is encoded in the upper bits of page->flags
- */
-struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
-EXPORT_SYMBOL(zone_table);
-
-static char *zone_names[MAX_NR_ZONES] = {
+static char * const zone_names[MAX_NR_ZONES] = {
 	 "DMA",
 #ifdef CONFIG_ZONE_DMA32
 	 "DMA32",
@@ -237,7 +230,7 @@
 	int i;
 	int nr_pages = 1 << order;
 
-	page[1].lru.next = (void *)free_compound_page;	/* set dtor */
+	set_compound_page_dtor(page, free_compound_page);
 	page[1].lru.prev = (void *)order;
 	for (i = 0; i < nr_pages; i++) {
 		struct page *p = page + i;
@@ -486,7 +479,7 @@
 	spin_lock(&zone->lock);
 	zone->all_unreclaimable = 0;
 	zone->pages_scanned = 0;
-	__free_one_page(page, zone ,order);
+	__free_one_page(page, zone, order);
 	spin_unlock(&zone->lock);
 }
 
@@ -605,6 +598,8 @@
 			1 << PG_checked | 1 << PG_mappedtodisk);
 	set_page_private(page, 0);
 	set_page_refcounted(page);
+
+	arch_alloc_page(page, order);
 	kernel_map_pages(page, 1 << order, 1);
 
 	if (gfp_flags & __GFP_ZERO)
@@ -690,9 +685,15 @@
 
 			pcp = &pset->pcp[i];
 			if (pcp->count) {
+				int to_drain;
+
 				local_irq_save(flags);
-				free_pages_bulk(zone, pcp->count, &pcp->list, 0);
-				pcp->count = 0;
+				if (pcp->count >= pcp->batch)
+					to_drain = pcp->batch;
+				else
+					to_drain = pcp->count;
+				free_pages_bulk(zone, to_drain, &pcp->list, 0);
+				pcp->count -= to_drain;
 				local_irq_restore(flags);
 			}
 		}
@@ -700,7 +701,6 @@
 }
 #endif
 
-#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
 static void __drain_pages(unsigned int cpu)
 {
 	unsigned long flags;
@@ -722,7 +722,6 @@
 		}
 	}
 }
-#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_PM
 
@@ -925,31 +924,160 @@
 	return 1;
 }
 
+#ifdef CONFIG_NUMA
 /*
- * get_page_from_freeliest goes through the zonelist trying to allocate
+ * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
+ * skip over zones that are not allowed by the cpuset, or that have
+ * been recently (in last second) found to be nearly full.  See further
+ * comments in mmzone.h.  Reduces cache footprint of zonelist scans
+ * that have to skip over alot of full or unallowed zones.
+ *
+ * If the zonelist cache is present in the passed in zonelist, then
+ * returns a pointer to the allowed node mask (either the current
+ * tasks mems_allowed, or node_online_map.)
+ *
+ * If the zonelist cache is not available for this zonelist, does
+ * nothing and returns NULL.
+ *
+ * If the fullzones BITMAP in the zonelist cache is stale (more than
+ * a second since last zap'd) then we zap it out (clear its bits.)
+ *
+ * We hold off even calling zlc_setup, until after we've checked the
+ * first zone in the zonelist, on the theory that most allocations will
+ * be satisfied from that first zone, so best to examine that zone as
+ * quickly as we can.
+ */
+static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
+{
+	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
+	nodemask_t *allowednodes;	/* zonelist_cache approximation */
+
+	zlc = zonelist->zlcache_ptr;
+	if (!zlc)
+		return NULL;
+
+	if (jiffies - zlc->last_full_zap > 1 * HZ) {
+		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+		zlc->last_full_zap = jiffies;
+	}
+
+	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
+					&cpuset_current_mems_allowed :
+					&node_online_map;
+	return allowednodes;
+}
+
+/*
+ * Given 'z' scanning a zonelist, run a couple of quick checks to see
+ * if it is worth looking at further for free memory:
+ *  1) Check that the zone isn't thought to be full (doesn't have its
+ *     bit set in the zonelist_cache fullzones BITMAP).
+ *  2) Check that the zones node (obtained from the zonelist_cache
+ *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
+ * Return true (non-zero) if zone is worth looking at further, or
+ * else return false (zero) if it is not.
+ *
+ * This check -ignores- the distinction between various watermarks,
+ * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
+ * found to be full for any variation of these watermarks, it will
+ * be considered full for up to one second by all requests, unless
+ * we are so low on memory on all allowed nodes that we are forced
+ * into the second scan of the zonelist.
+ *
+ * In the second scan we ignore this zonelist cache and exactly
+ * apply the watermarks to all zones, even it is slower to do so.
+ * We are low on memory in the second scan, and should leave no stone
+ * unturned looking for a free page.
+ */
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+						nodemask_t *allowednodes)
+{
+	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
+	int i;				/* index of *z in zonelist zones */
+	int n;				/* node that zone *z is on */
+
+	zlc = zonelist->zlcache_ptr;
+	if (!zlc)
+		return 1;
+
+	i = z - zonelist->zones;
+	n = zlc->z_to_n[i];
+
+	/* This zone is worth trying if it is allowed but not full */
+	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
+}
+
+/*
+ * Given 'z' scanning a zonelist, set the corresponding bit in
+ * zlc->fullzones, so that subsequent attempts to allocate a page
+ * from that zone don't waste time re-examining it.
+ */
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+{
+	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
+	int i;				/* index of *z in zonelist zones */
+
+	zlc = zonelist->zlcache_ptr;
+	if (!zlc)
+		return;
+
+	i = z - zonelist->zones;
+
+	set_bit(i, zlc->fullzones);
+}
+
+#else	/* CONFIG_NUMA */
+
+static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
+{
+	return NULL;
+}
+
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+				nodemask_t *allowednodes)
+{
+	return 1;
+}
+
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+{
+}
+#endif	/* CONFIG_NUMA */
+
+/*
+ * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
  */
 static struct page *
 get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
 		struct zonelist *zonelist, int alloc_flags)
 {
-	struct zone **z = zonelist->zones;
+	struct zone **z;
 	struct page *page = NULL;
-	int classzone_idx = zone_idx(*z);
+	int classzone_idx = zone_idx(zonelist->zones[0]);
 	struct zone *zone;
+	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
+	int zlc_active = 0;		/* set if using zonelist_cache */
+	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
 
+zonelist_scan:
 	/*
-	 * Go through the zonelist once, looking for a zone with enough free.
+	 * Scan zonelist, looking for a zone with enough free.
 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
 	 */
+	z = zonelist->zones;
+
 	do {
+		if (NUMA_BUILD && zlc_active &&
+			!zlc_zone_worth_trying(zonelist, z, allowednodes))
+				continue;
 		zone = *z;
 		if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) &&
 			zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
 				break;
 		if ((alloc_flags & ALLOC_CPUSET) &&
-				!cpuset_zone_allowed(zone, gfp_mask))
-			continue;
+			!cpuset_zone_allowed(zone, gfp_mask))
+				goto try_next_zone;
 
 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
 			unsigned long mark;
@@ -959,18 +1087,34 @@
 				mark = zone->pages_low;
 			else
 				mark = zone->pages_high;
-			if (!zone_watermark_ok(zone , order, mark,
-				    classzone_idx, alloc_flags))
+			if (!zone_watermark_ok(zone, order, mark,
+				    classzone_idx, alloc_flags)) {
 				if (!zone_reclaim_mode ||
 				    !zone_reclaim(zone, gfp_mask, order))
-					continue;
+					goto this_zone_full;
+			}
 		}
 
 		page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
-		if (page) {
+		if (page)
 			break;
+this_zone_full:
+		if (NUMA_BUILD)
+			zlc_mark_zone_full(zonelist, z);
+try_next_zone:
+		if (NUMA_BUILD && !did_zlc_setup) {
+			/* we do zlc_setup after the first zone is tried */
+			allowednodes = zlc_setup(zonelist, alloc_flags);
+			zlc_active = 1;
+			did_zlc_setup = 1;
 		}
 	} while (*(++z) != NULL);
+
+	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
+		/* Disable zlc cache for second zonelist scan */
+		zlc_active = 0;
+		goto zonelist_scan;
+	}
 	return page;
 }
 
@@ -1005,9 +1149,19 @@
 	if (page)
 		goto got_pg;
 
-	do {
+	/*
+	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
+	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
+	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
+	 * using a larger set of nodes after it has established that the
+	 * allowed per node queues are empty and that nodes are
+	 * over allocated.
+	 */
+	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+		goto nopage;
+
+	for (z = zonelist->zones; *z; z++)
 		wakeup_kswapd(*z, order);
-	} while (*(++z));
 
 	/*
 	 * OK, we're below the kswapd watermark and have kicked background
@@ -1041,6 +1195,7 @@
 
 	/* This allocation should allow future memory freeing. */
 
+rebalance:
 	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
 			&& !in_interrupt()) {
 		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
@@ -1062,7 +1217,6 @@
 	if (!wait)
 		goto nopage;
 
-rebalance:
 	cond_resched();
 
 	/* We now go into synchronous reclaim */
@@ -1262,7 +1416,7 @@
 static inline void show_node(struct zone *zone)
 {
 	if (NUMA_BUILD)
-		printk("Node %ld ", zone_to_nid(zone));
+		printk("Node %d ", zone_to_nid(zone));
 }
 
 void si_meminfo(struct sysinfo *val)
@@ -1542,6 +1696,24 @@
 	}
 }
 
+/* Construct the zonelist performance cache - see further mmzone.h */
+static void __meminit build_zonelist_cache(pg_data_t *pgdat)
+{
+	int i;
+
+	for (i = 0; i < MAX_NR_ZONES; i++) {
+		struct zonelist *zonelist;
+		struct zonelist_cache *zlc;
+		struct zone **z;
+
+		zonelist = pgdat->node_zonelists + i;
+		zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
+		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+		for (z = zonelist->zones; *z; z++)
+			zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
+	}
+}
+
 #else	/* CONFIG_NUMA */
 
 static void __meminit build_zonelists(pg_data_t *pgdat)
@@ -1579,14 +1751,26 @@
 	}
 }
 
+/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
+static void __meminit build_zonelist_cache(pg_data_t *pgdat)
+{
+	int i;
+
+	for (i = 0; i < MAX_NR_ZONES; i++)
+		pgdat->node_zonelists[i].zlcache_ptr = NULL;
+}
+
 #endif	/* CONFIG_NUMA */
 
 /* return values int ....just for stop_machine_run() */
 static int __meminit __build_all_zonelists(void *dummy)
 {
 	int nid;
-	for_each_online_node(nid)
+
+	for_each_online_node(nid) {
 		build_zonelists(NODE_DATA(nid));
+		build_zonelist_cache(NODE_DATA(nid));
+	}
 	return 0;
 }
 
@@ -1715,20 +1899,6 @@
 	}
 }
 
-#define ZONETABLE_INDEX(x, zone_nr)	((x << ZONES_SHIFT) | zone_nr)
-void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
-		unsigned long pfn, unsigned long size)
-{
-	unsigned long snum = pfn_to_section_nr(pfn);
-	unsigned long end = pfn_to_section_nr(pfn + size);
-
-	if (FLAGS_HAS_NODE)
-		zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
-	else
-		for (; snum <= end; snum++)
-			zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
-}
-
 #ifndef __HAVE_ARCH_MEMMAP_INIT
 #define memmap_init(size, nid, zone, start_pfn) \
 	memmap_init_zone((size), (nid), (zone), (start_pfn))
@@ -1881,16 +2051,16 @@
 	int ret = NOTIFY_OK;
 
 	switch (action) {
-		case CPU_UP_PREPARE:
-			if (process_zones(cpu))
-				ret = NOTIFY_BAD;
-			break;
-		case CPU_UP_CANCELED:
-		case CPU_DEAD:
-			free_zone_pagesets(cpu);
-			break;
-		default:
-			break;
+	case CPU_UP_PREPARE:
+		if (process_zones(cpu))
+			ret = NOTIFY_BAD;
+		break;
+	case CPU_UP_CANCELED:
+	case CPU_DEAD:
+		free_zone_pagesets(cpu);
+		break;
+	default:
+		break;
 	}
 	return ret;
 }
@@ -2421,7 +2591,6 @@
 		if (!size)
 			continue;
 
-		zonetable_add(zone, nid, j, zone_start_pfn, size);
 		ret = init_currently_empty_zone(zone, zone_start_pfn, size);
 		BUG_ON(ret);
 		zone_start_pfn += size;
@@ -2736,7 +2905,6 @@
 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int page_alloc_cpu_notify(struct notifier_block *self,
 				 unsigned long action, void *hcpu)
 {
@@ -2751,7 +2919,6 @@
 	}
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init page_alloc_init(void)
 {
@@ -3055,7 +3222,7 @@
 	/* allow the kernel cmdline to have a say */
 	if (!numentries) {
 		/* round applicable memory size up to nearest megabyte */
-		numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
+		numentries = nr_kernel_pages;
 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
 		numentries >>= 20 - PAGE_SHIFT;
 		numentries <<= 20 - PAGE_SHIFT;
diff --git a/mm/page_io.c b/mm/page_io.c
index d4840ec..dbffec0 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -147,48 +147,3 @@
 out:
 	return ret;
 }
-
-#ifdef CONFIG_SOFTWARE_SUSPEND
-/*
- * A scruffy utility function to read or write an arbitrary swap page
- * and wait on the I/O.  The caller must have a ref on the page.
- *
- * We use end_swap_bio_read() even for writes, because it happens to do what
- * we want.
- */
-int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page,
-			struct bio **bio_chain)
-{
-	struct bio *bio;
-	int ret = 0;
-	int bio_rw;
-
-	lock_page(page);
-
-	bio = get_swap_bio(GFP_KERNEL, entry.val, page, end_swap_bio_read);
-	if (bio == NULL) {
-		unlock_page(page);
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	bio_rw = rw;
-	if (!bio_chain)
-		bio_rw |= (1 << BIO_RW_SYNC);
-	if (bio_chain)
-		bio_get(bio);
-	submit_bio(bio_rw, bio);
-	if (bio_chain == NULL) {
-		wait_on_page_locked(page);
-
-		if (!PageUptodate(page) || PageError(page))
-			ret = -EIO;
-	}
-	if (bio_chain) {
-		bio->bi_private = *bio_chain;
-		*bio_chain = bio;
-	}
-out:
-	return ret;
-}
-#endif
diff --git a/mm/pdflush.c b/mm/pdflush.c
index b02102f..8ce0900 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -21,6 +21,7 @@
 #include <linux/writeback.h>	// Prototypes pdflush_operation()
 #include <linux/kthread.h>
 #include <linux/cpuset.h>
+#include <linux/freezer.h>
 
 
 /*
diff --git a/mm/readahead.c b/mm/readahead.c
index 23cb61a..a386f2b 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -148,13 +148,7 @@
 		if (!pagevec_add(&lru_pvec, page))
 			__pagevec_lru_add(&lru_pvec);
 		if (ret) {
-			while (!list_empty(pages)) {
-				struct page *victim;
-
-				victim = list_to_page(pages);
-				list_del(&victim->lru);
-				page_cache_release(victim);
-			}
+			put_pages_list(pages);
 			break;
 		}
 	}
diff --git a/mm/shmem.c b/mm/shmem.c
index 4959535..c820b4f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -177,7 +177,7 @@
 
 static struct super_operations shmem_ops;
 static const struct address_space_operations shmem_aops;
-static struct file_operations shmem_file_operations;
+static const struct file_operations shmem_file_operations;
 static struct inode_operations shmem_inode_operations;
 static struct inode_operations shmem_dir_inode_operations;
 static struct inode_operations shmem_special_inode_operations;
@@ -1943,7 +1943,7 @@
 	return security_inode_setsecurity(inode, name, value, size, flags);
 }
 
-struct xattr_handler shmem_xattr_security_handler = {
+static struct xattr_handler shmem_xattr_security_handler = {
 	.prefix = XATTR_SECURITY_PREFIX,
 	.list   = shmem_xattr_security_list,
 	.get    = shmem_xattr_security_get,
@@ -2263,7 +2263,7 @@
 static struct inode *shmem_alloc_inode(struct super_block *sb)
 {
 	struct shmem_inode_info *p;
-	p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
+	p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
 	if (!p)
 		return NULL;
 	return &p->vfs_inode;
@@ -2319,7 +2319,7 @@
 	.migratepage	= migrate_page,
 };
 
-static struct file_operations shmem_file_operations = {
+static const struct file_operations shmem_file_operations = {
 	.mmap		= shmem_mmap,
 #ifdef CONFIG_TMPFS
 	.llseek		= generic_file_llseek,
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e3..068cb45 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -103,12 +103,12 @@
 #include	<linux/module.h>
 #include	<linux/rcupdate.h>
 #include	<linux/string.h>
+#include	<linux/uaccess.h>
 #include	<linux/nodemask.h>
 #include	<linux/mempolicy.h>
 #include	<linux/mutex.h>
 #include	<linux/rtmutex.h>
 
-#include	<asm/uaccess.h>
 #include	<asm/cacheflush.h>
 #include	<asm/tlbflush.h>
 #include	<asm/page.h>
@@ -313,7 +313,7 @@
 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 			int node);
 static int enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(void *unused);
+static void cache_reap(struct work_struct *unused);
 
 /*
  * This function must be completely optimized away if a constant is passed to
@@ -730,7 +730,10 @@
 }
 #endif
 
-/* Guard access to the cache-chain. */
+/*
+ * 1. Guard access to the cache-chain.
+ * 2. Protect sanity of cpu_online_map against cpu hotplug events
+ */
 static DEFINE_MUTEX(cache_chain_mutex);
 static struct list_head cache_chain;
 
@@ -753,7 +756,7 @@
 	return g_cpucache_up == FULL;
 }
 
-static DEFINE_PER_CPU(struct work_struct, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, reap_work);
 
 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 {
@@ -866,6 +869,22 @@
 	dump_stack();
 }
 
+/*
+ * By default on NUMA we use alien caches to stage the freeing of
+ * objects allocated from other nodes. This causes massive memory
+ * inefficiencies when using fake NUMA setup to split memory into a
+ * large number of small nodes, so it can be disabled on the command
+ * line
+  */
+
+static int use_alien_caches __read_mostly = 1;
+static int __init noaliencache_setup(char *s)
+{
+	use_alien_caches = 0;
+	return 1;
+}
+__setup("noaliencache", noaliencache_setup);
+
 #ifdef CONFIG_NUMA
 /*
  * Special reaping functions for NUMA systems called from cache_reap().
@@ -916,16 +935,16 @@
  */
 static void __devinit start_cpu_timer(int cpu)
 {
-	struct work_struct *reap_work = &per_cpu(reap_work, cpu);
+	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
 
 	/*
 	 * When this gets called from do_initcalls via cpucache_init(),
 	 * init_workqueues() has already run, so keventd will be setup
 	 * at that time.
 	 */
-	if (keventd_up() && reap_work->func == NULL) {
+	if (keventd_up() && reap_work->work.func == NULL) {
 		init_reap_node(cpu);
-		INIT_WORK(reap_work, cache_reap, NULL);
+		INIT_DELAYED_WORK(reap_work, cache_reap);
 		schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
 	}
 }
@@ -996,7 +1015,7 @@
 	return NULL;
 }
 
-static inline void *__cache_alloc_node(struct kmem_cache *cachep,
+static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 		 gfp_t flags, int nodeid)
 {
 	return NULL;
@@ -1004,7 +1023,7 @@
 
 #else	/* CONFIG_NUMA */
 
-static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
+static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 
 static struct array_cache **alloc_alien_cache(int node, int limit)
@@ -1114,7 +1133,7 @@
 	 * Make sure we are not freeing a object from another node to the array
 	 * cache on this cpu.
 	 */
-	if (likely(slabp->nodeid == node))
+	if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches))
 		return 0;
 
 	l3 = cachep->nodelists[node];
@@ -1192,7 +1211,7 @@
 		list_for_each_entry(cachep, &cache_chain, next) {
 			struct array_cache *nc;
 			struct array_cache *shared;
-			struct array_cache **alien;
+			struct array_cache **alien = NULL;
 
 			nc = alloc_arraycache(node, cachep->limit,
 						cachep->batchcount);
@@ -1204,9 +1223,11 @@
 			if (!shared)
 				goto bad;
 
-			alien = alloc_alien_cache(node, cachep->limit);
-			if (!alien)
-				goto bad;
+			if (use_alien_caches) {
+                                alien = alloc_alien_cache(node, cachep->limit);
+                                if (!alien)
+                                        goto bad;
+                        }
 			cachep->array[cpu] = nc;
 			l3 = cachep->nodelists[node];
 			BUG_ON(!l3);
@@ -1230,12 +1251,18 @@
 			kfree(shared);
 			free_alien_cache(alien);
 		}
-		mutex_unlock(&cache_chain_mutex);
 		break;
 	case CPU_ONLINE:
+		mutex_unlock(&cache_chain_mutex);
 		start_cpu_timer(cpu);
 		break;
 #ifdef CONFIG_HOTPLUG_CPU
+	case CPU_DOWN_PREPARE:
+		mutex_lock(&cache_chain_mutex);
+		break;
+	case CPU_DOWN_FAILED:
+		mutex_unlock(&cache_chain_mutex);
+		break;
 	case CPU_DEAD:
 		/*
 		 * Even if all the cpus of a node are down, we don't free the
@@ -1246,8 +1273,8 @@
 		 * gets destroyed at kmem_cache_destroy().
 		 */
 		/* fall thru */
+#endif
 	case CPU_UP_CANCELED:
-		mutex_lock(&cache_chain_mutex);
 		list_for_each_entry(cachep, &cache_chain, next) {
 			struct array_cache *nc;
 			struct array_cache *shared;
@@ -1308,11 +1335,9 @@
 		}
 		mutex_unlock(&cache_chain_mutex);
 		break;
-#endif
 	}
 	return NOTIFY_OK;
 bad:
-	mutex_unlock(&cache_chain_mutex);
 	return NOTIFY_BAD;
 }
 
@@ -1580,12 +1605,7 @@
 	flags |= __GFP_COMP;
 #endif
 
-	/*
-	 * Under NUMA we want memory on the indicated node. We will handle
-	 * the needed fallback ourselves since we want to serve from our
-	 * per node object lists first for other nodes.
-	 */
-	flags |= cachep->gfpflags | GFP_THISNODE;
+	flags |= cachep->gfpflags;
 
 	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
 	if (!page)
@@ -2098,15 +2118,12 @@
 	}
 
 	/*
-	 * Prevent CPUs from coming and going.
-	 * lock_cpu_hotplug() nests outside cache_chain_mutex
+	 * We use cache_chain_mutex to ensure a consistent view of
+	 * cpu_online_map as well.  Please see cpuup_callback
 	 */
-	lock_cpu_hotplug();
-
 	mutex_lock(&cache_chain_mutex);
 
 	list_for_each_entry(pc, &cache_chain, next) {
-		mm_segment_t old_fs = get_fs();
 		char tmp;
 		int res;
 
@@ -2115,9 +2132,7 @@
 		 * destroy its slab cache and no-one else reuses the vmalloc
 		 * area of the module.  Print a warning.
 		 */
-		set_fs(KERNEL_DS);
-		res = __get_user(tmp, pc->name);
-		set_fs(old_fs);
+		res = probe_kernel_address(pc->name, tmp);
 		if (res) {
 			printk("SLAB: cache with size %d has lost its name\n",
 			       pc->buffer_size);
@@ -2197,25 +2212,24 @@
 	if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
 		ralign = BYTES_PER_WORD;
 
-	/* 2) arch mandated alignment: disables debug if necessary */
+	/* 2) arch mandated alignment */
 	if (ralign < ARCH_SLAB_MINALIGN) {
 		ralign = ARCH_SLAB_MINALIGN;
-		if (ralign > BYTES_PER_WORD)
-			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
 	}
-	/* 3) caller mandated alignment: disables debug if necessary */
+	/* 3) caller mandated alignment */
 	if (ralign < align) {
 		ralign = align;
-		if (ralign > BYTES_PER_WORD)
-			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
 	}
+	/* disable debug if necessary */
+	if (ralign > BYTES_PER_WORD)
+		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
 	/*
 	 * 4) Store it.
 	 */
 	align = ralign;
 
 	/* Get cache's description obj. */
-	cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
+	cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
 	if (!cachep)
 		goto oops;
 
@@ -2326,7 +2340,6 @@
 		panic("kmem_cache_create(): failed to create slab `%s'\n",
 		      name);
 	mutex_unlock(&cache_chain_mutex);
-	unlock_cpu_hotplug();
 	return cachep;
 }
 EXPORT_SYMBOL(kmem_cache_create);
@@ -2444,6 +2457,7 @@
 	return nr_freed;
 }
 
+/* Called with cache_chain_mutex held to protect against cpu hotplug */
 static int __cache_shrink(struct kmem_cache *cachep)
 {
 	int ret = 0, i = 0;
@@ -2474,9 +2488,13 @@
  */
 int kmem_cache_shrink(struct kmem_cache *cachep)
 {
+	int ret;
 	BUG_ON(!cachep || in_interrupt());
 
-	return __cache_shrink(cachep);
+	mutex_lock(&cache_chain_mutex);
+	ret = __cache_shrink(cachep);
+	mutex_unlock(&cache_chain_mutex);
+	return ret;
 }
 EXPORT_SYMBOL(kmem_cache_shrink);
 
@@ -2500,23 +2518,16 @@
 {
 	BUG_ON(!cachep || in_interrupt());
 
-	/* Don't let CPUs to come and go */
-	lock_cpu_hotplug();
-
 	/* Find the cache in the chain of caches. */
 	mutex_lock(&cache_chain_mutex);
 	/*
 	 * the chain is never empty, cache_cache is never destroyed
 	 */
 	list_del(&cachep->next);
-	mutex_unlock(&cache_chain_mutex);
-
 	if (__cache_shrink(cachep)) {
 		slab_error(cachep, "Can't free all objects");
-		mutex_lock(&cache_chain_mutex);
 		list_add(&cachep->next, &cache_chain);
 		mutex_unlock(&cache_chain_mutex);
-		unlock_cpu_hotplug();
 		return;
 	}
 
@@ -2524,7 +2535,7 @@
 		synchronize_rcu();
 
 	__kmem_cache_destroy(cachep);
-	unlock_cpu_hotplug();
+	mutex_unlock(&cache_chain_mutex);
 }
 EXPORT_SYMBOL(kmem_cache_destroy);
 
@@ -2548,7 +2559,7 @@
 	if (OFF_SLAB(cachep)) {
 		/* Slab management obj is off-slab. */
 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
-					      local_flags, nodeid);
+					      local_flags & ~GFP_THISNODE, nodeid);
 		if (!slabp)
 			return NULL;
 	} else {
@@ -2618,7 +2629,7 @@
 
 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
 {
-	if (flags & SLAB_DMA)
+	if (flags & GFP_DMA)
 		BUG_ON(!(cachep->gfpflags & GFP_DMA));
 	else
 		BUG_ON(cachep->gfpflags & GFP_DMA);
@@ -2689,10 +2700,10 @@
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
  */
-static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+static int cache_grow(struct kmem_cache *cachep,
+		gfp_t flags, int nodeid, void *objp)
 {
 	struct slab *slabp;
-	void *objp;
 	size_t offset;
 	gfp_t local_flags;
 	unsigned long ctor_flags;
@@ -2702,12 +2713,12 @@
 	 * Be lazy and only check for valid flags here,  keeping it out of the
 	 * critical path in kmem_cache_alloc().
 	 */
-	BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
-	if (flags & SLAB_NO_GROW)
+	BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW));
+	if (flags & __GFP_NO_GROW)
 		return 0;
 
 	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
-	local_flags = (flags & SLAB_LEVEL_MASK);
+	local_flags = (flags & GFP_LEVEL_MASK);
 	if (!(local_flags & __GFP_WAIT))
 		/*
 		 * Not allowed to sleep.  Need to tell a constructor about
@@ -2744,12 +2755,14 @@
 	 * Get mem for the objs.  Attempt to allocate a physical page from
 	 * 'nodeid'.
 	 */
-	objp = kmem_getpages(cachep, flags, nodeid);
+	if (!objp)
+		objp = kmem_getpages(cachep, flags, nodeid);
 	if (!objp)
 		goto failed;
 
 	/* Get slab management. */
-	slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
+	slabp = alloc_slabmgmt(cachep, objp, offset,
+			local_flags & ~GFP_THISNODE, nodeid);
 	if (!slabp)
 		goto opps1;
 
@@ -2987,7 +3000,7 @@
 
 	if (unlikely(!ac->avail)) {
 		int x;
-		x = cache_grow(cachep, flags, node);
+		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
 
 		/* cache_grow can reenable interrupts, then ac could change. */
 		ac = cpu_cache_get(cachep);
@@ -3063,6 +3076,12 @@
 
 		cachep->ctor(objp, cachep, ctor_flags);
 	}
+#if ARCH_SLAB_MINALIGN
+	if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
+		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+		       objp, ARCH_SLAB_MINALIGN);
+	}
+#endif
 	return objp;
 }
 #else
@@ -3105,10 +3124,10 @@
 		objp = ____cache_alloc(cachep, flags);
 	/*
 	 * We may just have run out of memory on the local node.
-	 * __cache_alloc_node() knows how to locate memory on other nodes
+	 * ____cache_alloc_node() knows how to locate memory on other nodes
 	 */
  	if (NUMA_BUILD && !objp)
- 		objp = __cache_alloc_node(cachep, flags, numa_node_id());
+ 		objp = ____cache_alloc_node(cachep, flags, numa_node_id());
 	local_irq_restore(save_flags);
 	objp = cache_alloc_debugcheck_after(cachep, flags, objp,
 					    caller);
@@ -3135,15 +3154,17 @@
 	else if (current->mempolicy)
 		nid_alloc = slab_node(current->mempolicy);
 	if (nid_alloc != nid_here)
-		return __cache_alloc_node(cachep, flags, nid_alloc);
+		return ____cache_alloc_node(cachep, flags, nid_alloc);
 	return NULL;
 }
 
 /*
  * Fallback function if there was no memory available and no objects on a
- * certain node and we are allowed to fall back. We mimick the behavior of
- * the page allocator. We fall back according to a zonelist determined by
- * the policy layer while obeying cpuset constraints.
+ * certain node and fall back is permitted. First we scan all the
+ * available nodelists for available objects. If that fails then we
+ * perform an allocation without specifying a node. This allows the page
+ * allocator to do its reclaim / fallback magic. We then insert the
+ * slab into the proper nodelist and then allocate from it.
  */
 void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 {
@@ -3151,15 +3172,51 @@
 					->node_zonelists[gfp_zone(flags)];
 	struct zone **z;
 	void *obj = NULL;
+	int nid;
 
+retry:
+	/*
+	 * Look through allowed nodes for objects available
+	 * from existing per node queues.
+	 */
 	for (z = zonelist->zones; *z && !obj; z++) {
-		int nid = zone_to_nid(*z);
+		nid = zone_to_nid(*z);
 
-		if (zone_idx(*z) <= ZONE_NORMAL &&
-				cpuset_zone_allowed(*z, flags) &&
-				cache->nodelists[nid])
-			obj = __cache_alloc_node(cache,
-					flags | __GFP_THISNODE, nid);
+		if (cpuset_zone_allowed(*z, flags) &&
+			cache->nodelists[nid] &&
+			cache->nodelists[nid]->free_objects)
+				obj = ____cache_alloc_node(cache,
+					flags | GFP_THISNODE, nid);
+	}
+
+	if (!obj) {
+		/*
+		 * This allocation will be performed within the constraints
+		 * of the current cpuset / memory policy requirements.
+		 * We may trigger various forms of reclaim on the allowed
+		 * set and go into memory reserves if necessary.
+		 */
+		obj = kmem_getpages(cache, flags, -1);
+		if (obj) {
+			/*
+			 * Insert into the appropriate per node queues
+			 */
+			nid = page_to_nid(virt_to_page(obj));
+			if (cache_grow(cache, flags, nid, obj)) {
+				obj = ____cache_alloc_node(cache,
+					flags | GFP_THISNODE, nid);
+				if (!obj)
+					/*
+					 * Another processor may allocate the
+					 * objects in the slab since we are
+					 * not holding any locks.
+					 */
+					goto retry;
+			} else {
+				kmem_freepages(cache, obj);
+				obj = NULL;
+			}
+		}
 	}
 	return obj;
 }
@@ -3167,7 +3224,7 @@
 /*
  * A interface to enable slab creation on nodeid
  */
-static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 				int nodeid)
 {
 	struct list_head *entry;
@@ -3216,7 +3273,7 @@
 
 must_grow:
 	spin_unlock(&l3->list_lock);
-	x = cache_grow(cachep, flags, nodeid);
+	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
 	if (x)
 		goto retry;
 
@@ -3434,35 +3491,59 @@
  * @flags: See kmalloc().
  * @nodeid: node number of the target node.
  *
- * Identical to kmem_cache_alloc, except that this function is slow
- * and can sleep. And it will allocate memory on the given node, which
- * can improve the performance for cpu bound structures.
- * New and improved: it will now make sure that the object gets
- * put on the correct node list so that there is no false sharing.
+ * Identical to kmem_cache_alloc but it will allocate memory on the given
+ * node, which can improve the performance for cpu bound structures.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
  */
-void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+static __always_inline void *
+__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+		int nodeid, void *caller)
 {
 	unsigned long save_flags;
-	void *ptr;
+	void *ptr = NULL;
 
 	cache_alloc_debugcheck_before(cachep, flags);
 	local_irq_save(save_flags);
 
-	if (nodeid == -1 || nodeid == numa_node_id() ||
-			!cachep->nodelists[nodeid])
-		ptr = ____cache_alloc(cachep, flags);
-	else
-		ptr = __cache_alloc_node(cachep, flags, nodeid);
-	local_irq_restore(save_flags);
+	if (unlikely(nodeid == -1))
+		nodeid = numa_node_id();
 
-	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
-					   __builtin_return_address(0));
+	if (likely(cachep->nodelists[nodeid])) {
+		if (nodeid == numa_node_id()) {
+			/*
+			 * Use the locally cached objects if possible.
+			 * However ____cache_alloc does not allow fallback
+			 * to other nodes. It may fail while we still have
+			 * objects on other nodes available.
+			 */
+			ptr = ____cache_alloc(cachep, flags);
+		}
+		if (!ptr) {
+			/* ___cache_alloc_node can fall back to other nodes */
+			ptr = ____cache_alloc_node(cachep, flags, nodeid);
+		}
+	} else {
+		/* Node not bootstrapped yet */
+		if (!(flags & __GFP_THISNODE))
+			ptr = fallback_alloc(cachep, flags);
+	}
+
+	local_irq_restore(save_flags);
+	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 
 	return ptr;
 }
+
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+{
+	return __cache_alloc_node(cachep, flags, nodeid,
+			__builtin_return_address(0));
+}
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *
+__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
 {
 	struct kmem_cache *cachep;
 
@@ -3471,8 +3552,29 @@
 		return NULL;
 	return kmem_cache_alloc_node(cachep, flags, node);
 }
+
+#ifdef CONFIG_DEBUG_SLAB
+void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+	return __do_kmalloc_node(size, flags, node,
+			__builtin_return_address(0));
+}
 EXPORT_SYMBOL(__kmalloc_node);
-#endif
+
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
+		int node, void *caller)
+{
+	return __do_kmalloc_node(size, flags, node, caller);
+}
+EXPORT_SYMBOL(__kmalloc_node_track_caller);
+#else
+void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+	return __do_kmalloc_node(size, flags, node, NULL);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+#endif /* CONFIG_DEBUG_SLAB */
+#endif /* CONFIG_NUMA */
 
 /**
  * __do_kmalloc - allocate memory
@@ -3583,13 +3685,15 @@
 	int node;
 	struct kmem_list3 *l3;
 	struct array_cache *new_shared;
-	struct array_cache **new_alien;
+	struct array_cache **new_alien = NULL;
 
 	for_each_online_node(node) {
 
-		new_alien = alloc_alien_cache(node, cachep->limit);
-		if (!new_alien)
-			goto fail;
+                if (use_alien_caches) {
+                        new_alien = alloc_alien_cache(node, cachep->limit);
+                        if (!new_alien)
+                                goto fail;
+                }
 
 		new_shared = alloc_arraycache(node,
 				cachep->shared*cachep->batchcount,
@@ -3815,7 +3919,7 @@
  * If we cannot acquire the cache chain mutex then just give up - we'll try
  * again on the next iteration.
  */
-static void cache_reap(void *unused)
+static void cache_reap(struct work_struct *unused)
 {
 	struct kmem_cache *searchp;
 	struct kmem_list3 *l3;
@@ -4038,7 +4142,7 @@
  * + further values on SMP and with statistics enabled
  */
 
-struct seq_operations slabinfo_op = {
+const struct seq_operations slabinfo_op = {
 	.start = s_start,
 	.next = s_next,
 	.stop = s_stop,
@@ -4236,7 +4340,7 @@
 	return 0;
 }
 
-struct seq_operations slabstats_op = {
+const struct seq_operations slabstats_op = {
 	.start = leaks_start,
 	.next = s_next,
 	.stop = s_stop,
diff --git a/mm/sparse.c b/mm/sparse.c
index b3c82ba..ac26eb0 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -24,6 +24,25 @@
 #endif
 EXPORT_SYMBOL(mem_section);
 
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+/*
+ * If we did not store the node number in the page then we have to
+ * do a lookup in the section_to_node_table in order to find which
+ * node the page belongs to.
+ */
+#if MAX_NUMNODES <= 256
+static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
+#else
+static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
+#endif
+
+int page_to_nid(struct page *page)
+{
+	return section_to_node_table[page_to_section(page)];
+}
+EXPORT_SYMBOL(page_to_nid);
+#endif
+
 #ifdef CONFIG_SPARSEMEM_EXTREME
 static struct mem_section *sparse_index_alloc(int nid)
 {
@@ -49,6 +68,10 @@
 	struct mem_section *section;
 	int ret = 0;
 
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+	section_to_node_table[section_nr] = nid;
+#endif
+
 	if (mem_section[root])
 		return -EEXIST;
 
diff --git a/mm/swap.c b/mm/swap.c
index 2e0e871..2ed7be3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -57,9 +57,9 @@
 {
 	page = (struct page *)page_private(page);
 	if (put_page_testzero(page)) {
-		void (*dtor)(struct page *page);
+		compound_page_dtor *dtor;
 
-		dtor = (void (*)(struct page *))page[1].lru.next;
+		dtor = get_compound_page_dtor(page);
 		(*dtor)(page);
 	}
 }
@@ -216,7 +216,7 @@
 }
 
 #ifdef CONFIG_NUMA
-static void lru_add_drain_per_cpu(void *dummy)
+static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
 	lru_add_drain();
 }
@@ -226,7 +226,7 @@
  */
 int lru_add_drain_all(void)
 {
-	return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+	return schedule_on_each_cpu(lru_add_drain_per_cpu);
 }
 
 #else
@@ -514,5 +514,7 @@
 	 * Right now other parts of the system means that we
 	 * _really_ don't want to cluster much more
 	 */
+#ifdef CONFIG_HOTPLUG_CPU
 	hotcpu_notifier(cpu_swap_callback, 0);
+#endif
 }
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a15def6..c543107 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -427,34 +427,48 @@
 
 #ifdef CONFIG_SOFTWARE_SUSPEND
 /*
- * Find the swap type that corresponds to given device (if any)
+ * Find the swap type that corresponds to given device (if any).
  *
- * This is needed for software suspend and is done in such a way that inode
- * aliasing is allowed.
+ * @offset - number of the PAGE_SIZE-sized block of the device, starting
+ * from 0, in which the swap header is expected to be located.
+ *
+ * This is needed for the suspend to disk (aka swsusp).
  */
-int swap_type_of(dev_t device)
+int swap_type_of(dev_t device, sector_t offset)
 {
+	struct block_device *bdev = NULL;
 	int i;
 
+	if (device)
+		bdev = bdget(device);
+
 	spin_lock(&swap_lock);
 	for (i = 0; i < nr_swapfiles; i++) {
-		struct inode *inode;
+		struct swap_info_struct *sis = swap_info + i;
 
-		if (!(swap_info[i].flags & SWP_WRITEOK))
+		if (!(sis->flags & SWP_WRITEOK))
 			continue;
 
-		if (!device) {
+		if (!bdev) {
 			spin_unlock(&swap_lock);
 			return i;
 		}
-		inode = swap_info[i].swap_file->f_dentry->d_inode;
-		if (S_ISBLK(inode->i_mode) &&
-		    device == MKDEV(imajor(inode), iminor(inode))) {
-			spin_unlock(&swap_lock);
-			return i;
+		if (bdev == sis->bdev) {
+			struct swap_extent *se;
+
+			se = list_entry(sis->extent_list.next,
+					struct swap_extent, list);
+			if (se->start_block == offset) {
+				spin_unlock(&swap_lock);
+				bdput(bdev);
+				return i;
+			}
 		}
 	}
 	spin_unlock(&swap_lock);
+	if (bdev)
+		bdput(bdev);
+
 	return -ENODEV;
 }
 
@@ -931,6 +945,23 @@
 	}
 }
 
+#ifdef CONFIG_SOFTWARE_SUSPEND
+/*
+ * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
+ * corresponding to given index in swap_info (swap type).
+ */
+sector_t swapdev_block(int swap_type, pgoff_t offset)
+{
+	struct swap_info_struct *sis;
+
+	if (swap_type >= nr_swapfiles)
+		return 0;
+
+	sis = swap_info + swap_type;
+	return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
+}
+#endif /* CONFIG_SOFTWARE_SUSPEND */
+
 /*
  * Free all of a swapdev's extent information
  */
@@ -1274,10 +1305,13 @@
 
 	mutex_lock(&swapon_mutex);
 
+	if (!l)
+		return SEQ_START_TOKEN;
+
 	for (i = 0; i < nr_swapfiles; i++, ptr++) {
 		if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
 			continue;
-		if (!l--)
+		if (!--l)
 			return ptr;
 	}
 
@@ -1286,10 +1320,17 @@
 
 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
 {
-	struct swap_info_struct *ptr = v;
+	struct swap_info_struct *ptr;
 	struct swap_info_struct *endptr = swap_info + nr_swapfiles;
 
-	for (++ptr; ptr < endptr; ptr++) {
+	if (v == SEQ_START_TOKEN)
+		ptr = swap_info;
+	else {
+		ptr = v;
+		ptr++;
+	}
+
+	for (; ptr < endptr; ptr++) {
 		if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
 			continue;
 		++*pos;
@@ -1310,8 +1351,10 @@
 	struct file *file;
 	int len;
 
-	if (v == swap_info)
-		seq_puts(swap, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+	if (ptr == SEQ_START_TOKEN) {
+		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+		return 0;
+	}
 
 	file = ptr->swap_file;
 	len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\");
@@ -1325,7 +1368,7 @@
 	return 0;
 }
 
-static struct seq_operations swaps_op = {
+static const struct seq_operations swaps_op = {
 	.start =	swap_start,
 	.next =		swap_next,
 	.stop =		swap_stop,
@@ -1337,7 +1380,7 @@
 	return seq_open(file, &swaps_op);
 }
 
-static struct file_operations proc_swaps_operations = {
+static const struct file_operations proc_swaps_operations = {
 	.open		= swaps_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -1540,6 +1583,11 @@
 		error = -EINVAL;
 		if (!maxpages)
 			goto bad_swap;
+		if (swapfilesize && maxpages > swapfilesize) {
+			printk(KERN_WARNING
+			       "Swap area shorter than signature indicates\n");
+			goto bad_swap;
+		}
 		if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
 			goto bad_swap;
 		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
@@ -1567,12 +1615,6 @@
 			goto bad_swap;
 	}
 
-	if (swapfilesize && maxpages > swapfilesize) {
-		printk(KERN_WARNING
-		       "Swap area shorter than signature indicates\n");
-		error = -EINVAL;
-		goto bad_swap;
-	}
 	if (nr_good_pages) {
 		p->swap_map[0] = SWAP_MAP_BAD;
 		p->max = maxpages;
diff --git a/mm/thrash.c b/mm/thrash.c
index f4c560b..9ef9071 100644
--- a/mm/thrash.c
+++ b/mm/thrash.c
@@ -7,100 +7,74 @@
  *
  * Simple token based thrashing protection, using the algorithm
  * described in:  http://www.cs.wm.edu/~sjiang/token.pdf
+ *
+ * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
+ * Improved algorithm to pass token:
+ * Each task has a priority which is incremented if it contended
+ * for the token in an interval less than its previous attempt.
+ * If the token is acquired, that task's priority is boosted to prevent
+ * the token from bouncing around too often and to let the task make
+ * some progress in its execution.
  */
+
 #include <linux/jiffies.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/swap.h>
 
 static DEFINE_SPINLOCK(swap_token_lock);
-static unsigned long swap_token_timeout;
-static unsigned long swap_token_check;
-struct mm_struct * swap_token_mm = &init_mm;
+struct mm_struct *swap_token_mm;
+static unsigned int global_faults;
 
-#define SWAP_TOKEN_CHECK_INTERVAL (HZ * 2)
-#define SWAP_TOKEN_TIMEOUT	(300 * HZ)
-/*
- * Currently disabled; Needs further code to work at HZ * 300.
- */
-unsigned long swap_token_default_timeout = SWAP_TOKEN_TIMEOUT;
-
-/*
- * Take the token away if the process had no page faults
- * in the last interval, or if it has held the token for
- * too long.
- */
-#define SWAP_TOKEN_ENOUGH_RSS 1
-#define SWAP_TOKEN_TIMED_OUT 2
-static int should_release_swap_token(struct mm_struct *mm)
-{
-	int ret = 0;
-	if (!mm->recent_pagein)
-		ret = SWAP_TOKEN_ENOUGH_RSS;
-	else if (time_after(jiffies, swap_token_timeout))
-		ret = SWAP_TOKEN_TIMED_OUT;
-	mm->recent_pagein = 0;
-	return ret;
-}
-
-/*
- * Try to grab the swapout protection token.  We only try to
- * grab it once every TOKEN_CHECK_INTERVAL, both to prevent
- * SMP lock contention and to check that the process that held
- * the token before is no longer thrashing.
- */
 void grab_swap_token(void)
 {
-	struct mm_struct *mm;
-	int reason;
+	int current_interval;
 
-	/* We have the token. Let others know we still need it. */
-	if (has_swap_token(current->mm)) {
-		current->mm->recent_pagein = 1;
-		if (unlikely(!swap_token_default_timeout))
-			disable_swap_token();
+	global_faults++;
+
+	current_interval = global_faults - current->mm->faultstamp;
+
+	if (!spin_trylock(&swap_token_lock))
 		return;
+
+	/* First come first served */
+	if (swap_token_mm == NULL) {
+		current->mm->token_priority = current->mm->token_priority + 2;
+		swap_token_mm = current->mm;
+		goto out;
 	}
 
-	if (time_after(jiffies, swap_token_check)) {
-
-		if (!swap_token_default_timeout) {
-			swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-			return;
+	if (current->mm != swap_token_mm) {
+		if (current_interval < current->mm->last_interval)
+			current->mm->token_priority++;
+		else {
+			current->mm->token_priority--;
+			if (unlikely(current->mm->token_priority < 0))
+				current->mm->token_priority = 0;
 		}
-
-		/* ... or if we recently held the token. */
-		if (time_before(jiffies, current->mm->swap_token_time))
-			return;
-
-		if (!spin_trylock(&swap_token_lock))
-			return;
-
-		swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-
-		mm = swap_token_mm;
-		if ((reason = should_release_swap_token(mm))) {
-			unsigned long eligible = jiffies;
-			if (reason == SWAP_TOKEN_TIMED_OUT) {
-				eligible += swap_token_default_timeout;
-			}
-			mm->swap_token_time = eligible;
-			swap_token_timeout = jiffies + swap_token_default_timeout;
+		/* Check if we deserve the token */
+		if (current->mm->token_priority >
+				swap_token_mm->token_priority) {
+			current->mm->token_priority += 2;
 			swap_token_mm = current->mm;
 		}
-		spin_unlock(&swap_token_lock);
+	} else {
+		/* Token holder came in again! */
+		current->mm->token_priority += 2;
 	}
-	return;
+
+out:
+	current->mm->faultstamp = global_faults;
+	current->mm->last_interval = current_interval;
+	spin_unlock(&swap_token_lock);
+return;
 }
 
 /* Called on process exit. */
 void __put_swap_token(struct mm_struct *mm)
 {
 	spin_lock(&swap_token_lock);
-	if (likely(mm == swap_token_mm)) {
-		mm->swap_token_time = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-		swap_token_mm = &init_mm;
-		swap_token_check = jiffies;
-	}
+	if (likely(mm == swap_token_mm))
+		swap_token_mm = NULL;
 	spin_unlock(&swap_token_lock);
 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 518540a..093f5fe 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -36,6 +36,7 @@
 #include <linux/rwsem.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -1172,11 +1173,12 @@
 			if (!zone_watermark_ok(zone, order, zone->pages_high,
 					       0, 0)) {
 				end_zone = i;
-				goto scan;
+				break;
 			}
 		}
-		goto out;
-scan:
+		if (i < 0)
+			goto out;
+
 		for (i = 0; i <= end_zone; i++) {
 			struct zone *zone = pgdat->node_zones + i;
 
@@ -1259,6 +1261,9 @@
 	}
 	if (!all_zones_ok) {
 		cond_resched();
+
+		try_to_freeze();
+
 		goto loop_again;
 	}
 
@@ -1508,7 +1513,6 @@
 }
 #endif
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
    away, we get changed to run anywhere: as the first one comes back,
@@ -1529,7 +1533,6 @@
 	}
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 /*
  * This kswapd start function will be called by init and node-hot-add.
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8614e8f..dc005a0 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -430,7 +430,7 @@
 	return 0;
 }
 
-struct seq_operations fragmentation_op = {
+const struct seq_operations fragmentation_op = {
 	.start	= frag_start,
 	.next	= frag_next,
 	.stop	= frag_stop,
@@ -452,7 +452,7 @@
 #define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \
 					TEXT_FOR_HIGHMEM(xx)
 
-static char *vmstat_text[] = {
+static const char * const vmstat_text[] = {
 	/* Zoned VM counters */
 	"nr_anon_pages",
 	"nr_mapped",
@@ -597,7 +597,7 @@
 	return 0;
 }
 
-struct seq_operations zoneinfo_op = {
+const struct seq_operations zoneinfo_op = {
 	.start	= frag_start, /* iterate over all zones. The same as in
 			       * fragmentation. */
 	.next	= frag_next,
@@ -660,7 +660,7 @@
 	m->private = NULL;
 }
 
-struct seq_operations vmstat_op = {
+const struct seq_operations vmstat_op = {
 	.start	= vmstat_start,
 	.next	= vmstat_next,
 	.stop	= vmstat_stop,
@@ -679,13 +679,13 @@
 		void *hcpu)
 {
 	switch (action) {
-		case CPU_UP_PREPARE:
-		case CPU_UP_CANCELED:
-		case CPU_DEAD:
-			refresh_zone_stat_thresholds();
-			break;
-		default:
-			break;
+	case CPU_UP_PREPARE:
+	case CPU_UP_CANCELED:
+	case CPU_DEAD:
+		refresh_zone_stat_thresholds();
+		break;
+	default:
+		break;
 	}
 	return NOTIFY_OK;
 }
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5946ec6..3fc0abe 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1454,7 +1454,7 @@
 
 #define LEC_ARP_REFRESH_INTERVAL (3*HZ)
 
-static void lec_arp_check_expire(void *data);
+static void lec_arp_check_expire(struct work_struct *work);
 static void lec_arp_expire_arp(unsigned long data);
 
 /* 
@@ -1477,7 +1477,7 @@
         INIT_HLIST_HEAD(&priv->lec_no_forward);
         INIT_HLIST_HEAD(&priv->mcast_fwds);
 	spin_lock_init(&priv->lec_arp_lock);
-	INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
+	INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
 	schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
 }
 
@@ -1875,10 +1875,11 @@
  *       to ESI_FORWARD_DIRECT. This causes the flush period to end
  *       regardless of the progress of the flush protocol.
  */
-static void lec_arp_check_expire(void *data)
+static void lec_arp_check_expire(struct work_struct *work)
 {
 	unsigned long flags;
-	struct lec_priv *priv = data;
+	struct lec_priv *priv =
+		container_of(work, struct lec_priv, lec_arp_work.work);
 	struct hlist_node *node, *next;
 	struct lec_arp_table *entry;
 	unsigned long now;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 24cc95f..99136ba 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -92,7 +92,7 @@
 	spinlock_t lec_arp_lock;
 	struct atm_vcc *mcast_vcc;		/* Default Multicast Send VCC */
 	struct atm_vcc *lecd;
-	struct work_struct lec_arp_work;	/* C10 */
+	struct delayed_work lec_arp_work;	/* C10 */
 	unsigned int maximum_unknown_frame_count;
 						/*
 						 * Within the period of time defined by this variable, the client will send
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3eeeb7a..d4c9356 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -237,9 +237,9 @@
 	kfree(data);
 }
 
-static void add_conn(void *data)
+static void add_conn(struct work_struct *work)
 {
-	struct hci_conn *conn = data;
+	struct hci_conn *conn = container_of(work, struct hci_conn, work);
 	int i;
 
 	if (device_register(&conn->dev) < 0) {
@@ -272,14 +272,14 @@
 
 	dev_set_drvdata(&conn->dev, conn);
 
-	INIT_WORK(&conn->work, add_conn, (void *) conn);
+	INIT_WORK(&conn->work, add_conn);
 
 	schedule_work(&conn->work);
 }
 
-static void del_conn(void *data)
+static void del_conn(struct work_struct *work)
 {
-	struct hci_conn *conn = data;
+	struct hci_conn *conn = container_of(work, struct hci_conn, work);
 	device_del(&conn->dev);
 }
 
@@ -287,7 +287,7 @@
 {
 	BT_DBG("conn %p", conn);
 
-	INIT_WORK(&conn->work, del_conn, (void *) conn);
+	INIT_WORK(&conn->work, del_conn);
 
 	schedule_work(&conn->work);
 }
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index d9f0486..8ca448d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -23,7 +23,7 @@
 #include <asm/atomic.h>
 #include "br_private.h"
 
-static kmem_cache_t *br_fdb_cache __read_mostly;
+static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		      const unsigned char *addr);
 
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f753c40..55bb263 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -77,12 +77,16 @@
  * Called from work queue to allow for calling functions that
  * might sleep (such as speed check), and to debounce.
  */
-static void port_carrier_check(void *arg)
+static void port_carrier_check(struct work_struct *work)
 {
-	struct net_device *dev = arg;
 	struct net_bridge_port *p;
+	struct net_device *dev;
 	struct net_bridge *br;
 
+	dev = container_of(work, struct net_bridge_port,
+			   carrier_check.work)->dev;
+	work_release(work);
+
 	rtnl_lock();
 	p = dev->br_port;
 	if (!p)
@@ -276,7 +280,7 @@
 	p->port_no = index;
 	br_init_port(p);
 	p->state = BR_STATE_DISABLED;
-	INIT_WORK(&p->carrier_check, port_carrier_check, dev);
+	INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
 	br_stp_port_timer_init(p);
 
 	kobject_init(&p->kobj);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 74258d8..3a534e9 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,7 +82,7 @@
 	struct timer_list		hold_timer;
 	struct timer_list		message_age_timer;
 	struct kobject			kobj;
-	struct work_struct		carrier_check;
+	struct delayed_work		carrier_check;
 	struct rcu_head			rcu;
 };
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 59d058a..e660cb5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3340,7 +3340,6 @@
 
 EXPORT_SYMBOL(unregister_netdev);
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int dev_cpu_callback(struct notifier_block *nfb,
 			    unsigned long action,
 			    void *ocpu)
@@ -3384,7 +3383,6 @@
 
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_NET_DMA
 /**
diff --git a/net/core/dst.c b/net/core/dst.c
index 1a5e49d..836ec66 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -125,7 +125,7 @@
 		if (ops->gc())
 			return NULL;
 	}
-	dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC);
+	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
 	if (!dst)
 		return NULL;
 	memset(dst, 0, ops->entry_size);
diff --git a/net/core/flow.c b/net/core/flow.c
index b16d31a..d137f97 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -44,7 +44,7 @@
 
 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
 
-static kmem_cache_t *flow_cachep __read_mostly;
+static struct kmem_cache *flow_cachep __read_mostly;
 
 static int flow_lwm, flow_hwm;
 
@@ -211,7 +211,7 @@
 		if (flow_count(cpu) > flow_hwm)
 			flow_cache_shrink(cpu);
 
-		fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
+		fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
 		if (fle) {
 			fle->next = *head;
 			*head = fle;
@@ -340,7 +340,6 @@
 	tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int flow_cache_cpu(struct notifier_block *nfb,
 			  unsigned long action,
 			  void *hcpu)
@@ -349,7 +348,6 @@
 		__flow_cache_shrink((unsigned long)hcpu, 0);
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static int __init flow_cache_init(void)
 {
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4b36114..549a2ce 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -34,8 +34,8 @@
 static unsigned long linkwatch_flags;
 static unsigned long linkwatch_nextevent;
 
-static void linkwatch_event(void *dummy);
-static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
+static void linkwatch_event(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
 
 static LIST_HEAD(lweventlist);
 static DEFINE_SPINLOCK(lweventlist_lock);
@@ -127,7 +127,7 @@
 }       
 
 
-static void linkwatch_event(void *dummy)
+static void linkwatch_event(struct work_struct *dummy)
 {
 	/* Limit the number of linkwatch events to one
 	 * per second so that a runaway driver does not
@@ -171,10 +171,9 @@
 			unsigned long delay = linkwatch_nextevent - jiffies;
 
 			/* If we wrap around we'll delay it by at most HZ. */
-			if (!delay || delay > HZ)
-				schedule_work(&linkwatch_work);
-			else
-				schedule_delayed_work(&linkwatch_work, delay);
+			if (delay > HZ)
+				delay = 0;
+			schedule_delayed_work(&linkwatch_work, delay);
 		}
 	}
 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ba509a4..0ab1987 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -251,7 +251,7 @@
 			goto out_entries;
 	}
 
-	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
+	n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC);
 	if (!n)
 		goto out_entries;
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3c58846..b3c559b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -50,9 +50,10 @@
 static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
 {
-	struct netpoll_info *npinfo = p;
+	struct netpoll_info *npinfo =
+		container_of(work, struct netpoll_info, tx_work.work);
 	struct sk_buff *skb;
 
 	while ((skb = skb_dequeue(&npinfo->txq))) {
@@ -72,8 +73,6 @@
 			schedule_delayed_work(&npinfo->tx_work, HZ/10);
 			return;
 		}
-
-		netif_tx_unlock_bh(dev);
 	}
 }
 
@@ -263,7 +262,7 @@
 
 	if (status != NETDEV_TX_OK) {
 		skb_queue_tail(&npinfo->txq, skb);
-		schedule_work(&npinfo->tx_work);
+		schedule_delayed_work(&npinfo->tx_work,0);
 	}
 }
 
@@ -628,7 +627,7 @@
 		spin_lock_init(&npinfo->rx_lock);
 		skb_queue_head_init(&npinfo->arp_tx);
 		skb_queue_head_init(&npinfo->txq);
-		INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
+		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 
 		atomic_set(&npinfo->refcnt, 1);
 	} else {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e1c385..de7801d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -68,8 +68,8 @@
 
 #include "kmap_skb.h"
 
-static kmem_cache_t *skbuff_head_cache __read_mostly;
-static kmem_cache_t *skbuff_fclone_cache __read_mostly;
+static struct kmem_cache *skbuff_head_cache __read_mostly;
+static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 
 /*
  *	Keep out-of-line to prevent kernel bloat.
@@ -132,6 +132,7 @@
  *	@gfp_mask: allocation mask
  *	@fclone: allocate from fclone cache instead of head cache
  *		and allocate a cloned (child) skb
+ *	@node: numa node to allocate memory on
  *
  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
  *	tail room of size bytes. The object has a reference count of one.
@@ -141,9 +142,9 @@
  *	%GFP_ATOMIC.
  */
 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
-			    int fclone)
+			    int fclone, int node)
 {
-	kmem_cache_t *cache;
+	struct kmem_cache *cache;
 	struct skb_shared_info *shinfo;
 	struct sk_buff *skb;
 	u8 *data;
@@ -151,14 +152,14 @@
 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
 
 	/* Get the HEAD */
-	skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
+	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 	if (!skb)
 		goto out;
 
 	/* Get the DATA. Size must match skb_add_mtu(). */
 	size = SKB_DATA_ALIGN(size);
-	data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
-			gfp_mask);
+	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
+			gfp_mask, node);
 	if (!data)
 		goto nodata;
 
@@ -210,7 +211,7 @@
  *	Buffers may only be allocated from interrupts using a @gfp_mask of
  *	%GFP_ATOMIC.
  */
-struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
 				     unsigned int size,
 				     gfp_t gfp_mask)
 {
@@ -267,9 +268,10 @@
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
 		unsigned int length, gfp_t gfp_mask)
 {
+	int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1;
 	struct sk_buff *skb;
 
-	skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
+ 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
 	if (likely(skb)) {
 		skb_reserve(skb, NET_SKB_PAD);
 		skb->dev = dev;
diff --git a/net/core/sock.c b/net/core/sock.c
index 419c7d3..0ed5b4f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -810,24 +810,11 @@
  */
 static void inline sock_lock_init(struct sock *sk)
 {
-	spin_lock_init(&sk->sk_lock.slock);
-	sk->sk_lock.owner = NULL;
-	init_waitqueue_head(&sk->sk_lock.wq);
-	/*
-	 * Make sure we are not reinitializing a held lock:
-	 */
-	debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
-
-	/*
-	 * Mark both the sk_lock and the sk_lock.slock as a
-	 * per-address-family lock class:
-	 */
-	lockdep_set_class_and_name(&sk->sk_lock.slock,
-				   af_family_slock_keys + sk->sk_family,
-				   af_family_slock_key_strings[sk->sk_family]);
-	lockdep_init_map(&sk->sk_lock.dep_map,
-			 af_family_key_strings[sk->sk_family],
-			 af_family_keys + sk->sk_family, 0);
+	sock_lock_init_class_and_name(sk,
+			af_family_slock_key_strings[sk->sk_family],
+			af_family_slock_keys + sk->sk_family,
+			af_family_key_strings[sk->sk_family],
+			af_family_keys + sk->sk_family);
 }
 
 /**
@@ -841,7 +828,7 @@
 		      struct proto *prot, int zero_it)
 {
 	struct sock *sk = NULL;
-	kmem_cache_t *slab = prot->slab;
+	struct kmem_cache *slab = prot->slab;
 
 	if (slab != NULL)
 		sk = kmem_cache_alloc(slab, priority);
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index bdf1bb7..1f4727d 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -21,8 +21,8 @@
 
 #include <net/sock.h>
 
-static kmem_cache_t *dccp_ackvec_slab;
-static kmem_cache_t *dccp_ackvec_record_slab;
+static struct kmem_cache *dccp_ackvec_slab;
+static struct kmem_cache *dccp_ackvec_record_slab;
 
 static struct dccp_ackvec_record *dccp_ackvec_record_new(void)
 {
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index ff05e59..d8cf92f 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -55,9 +55,9 @@
 #define ccids_read_unlock() do { } while(0)
 #endif
 
-static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
+static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
 {
-	kmem_cache_t *slab;
+	struct kmem_cache *slab;
 	char slab_name_fmt[32], *slab_name;
 	va_list args;
 
@@ -75,7 +75,7 @@
 	return slab;
 }
 
-static void ccid_kmem_cache_destroy(kmem_cache_t *slab)
+static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
 {
 	if (slab != NULL) {
 		const char *name = kmem_cache_name(slab);
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index c7c2951..bcc2d12 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -27,9 +27,9 @@
 	unsigned char	ccid_id;
 	const char	*ccid_name;
 	struct module	*ccid_owner;
-	kmem_cache_t	*ccid_hc_rx_slab;
+	struct kmem_cache	*ccid_hc_rx_slab;
 	__u32		ccid_hc_rx_obj_size;
-	kmem_cache_t	*ccid_hc_tx_slab;
+	struct kmem_cache	*ccid_hc_tx_slab;
 	__u32		ccid_hc_tx_obj_size;
 	int		(*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk);
 	int		(*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index cf8c07b..66a27b9 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -295,7 +295,7 @@
 	new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
 	if (new_packet == NULL || new_packet->dccphtx_sent) {
 		new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
-						    SLAB_ATOMIC);
+						    GFP_ATOMIC);
 
 		if (unlikely(new_packet == NULL)) {
 			DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
@@ -889,7 +889,7 @@
 		/* new loss event detected */
 		/* calculate last interval length */
 		seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
-		entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
+		entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC);
 
 		if (entry == NULL) {
 			DCCP_BUG("out of memory - can not allocate entry");
@@ -1011,7 +1011,7 @@
 	}
 
 	packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
-					skb, SLAB_ATOMIC);
+					skb, GFP_ATOMIC);
 	if (unlikely(packet == NULL)) {
 		DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
 			  "to history, consider it lost!\n", dccp_role(sk), sk);
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 48b9b93..0a0baef 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -125,7 +125,7 @@
 	int i;
 
 	for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
-		entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
+		entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC);
 		if (entry == NULL) {
 			dccp_li_hist_purge(hist, list);
 			DCCP_BUG("loss interval list entry is NULL");
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index 0ae85f0..eb25701 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -20,7 +20,7 @@
 #define DCCP_LI_HIST_IVAL_F_LENGTH  8
 
 struct dccp_li_hist {
-	kmem_cache_t *dccplih_slab;
+	struct kmem_cache *dccplih_slab;
 };
 
 extern struct dccp_li_hist *dccp_li_hist_new(const char *name);
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 067cf1c..9a8bcf2 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -68,14 +68,14 @@
 };
 
 struct dccp_tx_hist {
-	kmem_cache_t *dccptxh_slab;
+	struct kmem_cache *dccptxh_slab;
 };
 
 extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
 extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
 
 struct dccp_rx_hist {
-	kmem_cache_t *dccprxh_slab;
+	struct kmem_cache *dccprxh_slab;
 };
 
 extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 7b52f2a..4c9e267 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -32,8 +32,7 @@
 	.tw_timer	= TIMER_INITIALIZER(inet_twdr_hangman, 0,
 					    (unsigned long)&dccp_death_row),
 	.twkill_work	= __WORK_INITIALIZER(dccp_death_row.twkill_work,
-					     inet_twdr_twkill_work,
-					     &dccp_death_row),
+					     inet_twdr_twkill_work),
 /* Short-time timewait calendar */
 
 	.twcal_hand	= -1,
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index bdbc3f4..13b2421 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -79,7 +79,7 @@
 static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
 static DEFINE_RWLOCK(dn_fib_tables_lock);
 
-static kmem_cache_t *dn_hash_kmem __read_mostly;
+static struct kmem_cache *dn_hash_kmem __read_mostly;
 static int dn_fib_hash_zombies;
 
 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
@@ -590,7 +590,7 @@
 
 replace:
 	err = -ENOBUFS;
-	new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL);
+	new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL);
 	if (new_f == NULL)
 		goto out;
 
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index cf51c87..08386c1 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -58,9 +58,11 @@
 }
 
 void
-ieee80211softmac_assoc_timeout(void *d)
+ieee80211softmac_assoc_timeout(struct work_struct *work)
 {
-	struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+	struct ieee80211softmac_device *mac =
+		container_of(work, struct ieee80211softmac_device,
+			     associnfo.timeout.work);
 	struct ieee80211softmac_network *n;
 
 	mutex_lock(&mac->associnfo.mutex);
@@ -186,9 +188,11 @@
 
 /* This function is called to handle userspace requests (asynchronously) */
 void
-ieee80211softmac_assoc_work(void *d)
+ieee80211softmac_assoc_work(struct work_struct *work)
 {
-	struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+	struct ieee80211softmac_device *mac =
+		container_of(work, struct ieee80211softmac_device,
+			     associnfo.work.work);
 	struct ieee80211softmac_network *found = NULL;
 	struct ieee80211_network *net = NULL, *best = NULL;
 	int bssvalid;
@@ -412,7 +416,7 @@
 				network->authenticated = 0;
 				/* we don't want to do this more than once ... */
 				network->auth_desynced_once = 1;
-				schedule_work(&mac->associnfo.work);
+				schedule_delayed_work(&mac->associnfo.work, 0);
 				break;
 			}
 		default:
@@ -446,7 +450,7 @@
 	ieee80211softmac_disassoc(mac);
 
 	/* try to reassociate */
-	schedule_work(&mac->associnfo.work);
+	schedule_delayed_work(&mac->associnfo.work, 0);
 
 	return 0;
 }
@@ -466,7 +470,7 @@
 		dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
 		return 0;
 	}
-	schedule_work(&mac->associnfo.work);
+	schedule_delayed_work(&mac->associnfo.work, 0);
 
 	return 0;
 }
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 0612015..6012705 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -26,7 +26,7 @@
 
 #include "ieee80211softmac_priv.h"
 
-static void ieee80211softmac_auth_queue(void *data);
+static void ieee80211softmac_auth_queue(struct work_struct *work);
 
 /* Queues an auth request to the desired AP */
 int
@@ -54,14 +54,14 @@
 	auth->mac = mac;
 	auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
 	auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
-	INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth);
+	INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
 	
 	/* Lock (for list) */
 	spin_lock_irqsave(&mac->lock, flags);
 
 	/* add to list */
 	list_add_tail(&auth->list, &mac->auth_queue);
-	schedule_work(&auth->work);
+	schedule_delayed_work(&auth->work, 0);
 	spin_unlock_irqrestore(&mac->lock, flags);
 	
 	return 0;
@@ -70,14 +70,15 @@
 
 /* Sends an auth request to the desired AP and handles timeouts */
 static void
-ieee80211softmac_auth_queue(void *data)
+ieee80211softmac_auth_queue(struct work_struct *work)
 {
 	struct ieee80211softmac_device *mac;
 	struct ieee80211softmac_auth_queue_item *auth;
 	struct ieee80211softmac_network *net;
 	unsigned long flags;
 
-	auth = (struct ieee80211softmac_auth_queue_item *)data;
+	auth = container_of(work, struct ieee80211softmac_auth_queue_item,
+			    work.work);
 	net = auth->net;
 	mac = auth->mac;
 
@@ -118,9 +119,11 @@
 
 /* Sends a response to an auth challenge (for shared key auth). */
 static void
-ieee80211softmac_auth_challenge_response(void *_aq)
+ieee80211softmac_auth_challenge_response(struct work_struct *work)
 {
-	struct ieee80211softmac_auth_queue_item *aq = _aq;
+	struct ieee80211softmac_auth_queue_item *aq =
+		container_of(work, struct ieee80211softmac_auth_queue_item,
+			     work.work);
 
 	/* Send our response */
 	ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
@@ -234,8 +237,8 @@
 			 * we have obviously already sent the initial auth
 			 * request. */
 			cancel_delayed_work(&aq->work);
-			INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
-			schedule_work(&aq->work);
+			INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
+			schedule_delayed_work(&aq->work, 0);
 			spin_unlock_irqrestore(&mac->lock, flags);
 			return 0;
 		case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@ -398,6 +401,6 @@
 	ieee80211softmac_deauth_from_net(mac, net);
 
 	/* let's try to re-associate */
-	schedule_work(&mac->associnfo.work);
+	schedule_delayed_work(&mac->associnfo.work, 0);
 	return 0;
 }
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index f34fa2e..b901565 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -73,10 +73,12 @@
 
 
 static void
-ieee80211softmac_notify_callback(void *d)
+ieee80211softmac_notify_callback(struct work_struct *work)
 {
-	struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d;
-	kfree(d);
+	struct ieee80211softmac_event *pevent =
+		container_of(work, struct ieee80211softmac_event, work.work);
+	struct ieee80211softmac_event event = *pevent;
+	kfree(pevent);
 	
 	event.fun(event.mac->dev, event.event_type, event.context);
 }
@@ -99,7 +101,7 @@
 		return -ENOMEM;
 	
 	eventptr->event_type = event;
-	INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr);
+	INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
 	eventptr->fun = fun;
 	eventptr->context = context;
 	eventptr->mac = mac;
@@ -170,7 +172,7 @@
 				/* User may have subscribed to ANY event, so
 				 * we tell them which event triggered it. */
 				eventptr->event_type = event;
-				schedule_work(&eventptr->work);
+				schedule_delayed_work(&eventptr->work, 0);
 			}
 		}
 }
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 33aff4f..256207b 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -58,8 +58,8 @@
 	INIT_LIST_HEAD(&softmac->events);
 
 	mutex_init(&softmac->associnfo.mutex);
-	INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac);
-	INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac);
+	INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
+	INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
 	softmac->start_scan = ieee80211softmac_start_scan_implementation;
 	softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
 	softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 0642e09..c0dbe07 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -78,7 +78,7 @@
 /* private definitions and prototypes */
 
 /*** prototypes from _scan.c */
-void ieee80211softmac_scan(void *sm);
+void ieee80211softmac_scan(struct work_struct *work);
 /* for internal use if scanning is needed */
 int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
 void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
@@ -149,7 +149,7 @@
 int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
 
 /*** prototypes from _assoc.c */
-void ieee80211softmac_assoc_work(void *d);
+void ieee80211softmac_assoc_work(struct work_struct *work);
 int ieee80211softmac_handle_assoc_response(struct net_device * dev,
 					   struct ieee80211_assoc_response * resp,
 					   struct ieee80211_network * network);
@@ -157,7 +157,7 @@
 				     struct ieee80211_disassoc * disassoc);
 int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
 				        struct ieee80211_reassoc_request * reassoc);
-void ieee80211softmac_assoc_timeout(void *d);
+void ieee80211softmac_assoc_timeout(struct work_struct *work);
 void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
 void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
 
@@ -207,7 +207,7 @@
 	struct ieee80211softmac_device	*mac;	/* SoftMAC device */
 	u8 retry;				/* Retry limit */
 	u8 state;				/* Auth State */
-	struct work_struct		work;	/* Work queue */
+	struct delayed_work		work;	/* Work queue */
 };
 
 /* scanning information */
@@ -219,7 +219,8 @@
 	   stop:1;
 	u8 skip_flags;
 	struct completion finished;
-	struct work_struct softmac_scan;
+	struct delayed_work softmac_scan;
+	struct ieee80211softmac_device *mac;
 };
 
 /* private event struct */
@@ -227,7 +228,7 @@
 	struct list_head list;
 	int event_type;
 	void *event_context;
-	struct work_struct work;
+	struct delayed_work work;
 	notify_function_ptr fun;
 	void *context;
 	struct ieee80211softmac_device *mac;
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index 5507fea..0c85d6c 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -90,12 +90,14 @@
 
 
 /* internal scanning implementation follows */
-void ieee80211softmac_scan(void *d)
+void ieee80211softmac_scan(struct work_struct *work)
 {
 	int invalid_channel;
 	u8 current_channel_idx;
-	struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d;
-	struct ieee80211softmac_scaninfo *si = sm->scaninfo;
+	struct ieee80211softmac_scaninfo *si =
+		container_of(work, struct ieee80211softmac_scaninfo,
+			     softmac_scan.work);
+	struct ieee80211softmac_device *sm = si->mac;
 	unsigned long flags;
 
 	while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
@@ -146,7 +148,8 @@
 	struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
 	if (unlikely(!info))
 		return NULL;
-	INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac);
+	INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
+	info->mac = mac;
 	init_completion(&info->finished);
 	return info;
 }
@@ -187,7 +190,7 @@
 	sm->scaninfo->started = 1;
 	sm->scaninfo->stop = 0;
 	INIT_COMPLETION(sm->scaninfo->finished);
-	schedule_work(&sm->scaninfo->softmac_scan);
+	schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
 	spin_unlock_irqrestore(&sm->lock, flags);
 	return 0;
 }
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 23068a8..2ffaebd 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -122,7 +122,7 @@
 
 	sm->associnfo.associating = 1;
 	/* queue lower level code to do work (if necessary) */
-	schedule_work(&sm->associnfo.work);
+	schedule_delayed_work(&sm->associnfo.work, 0);
 out:
 	mutex_unlock(&sm->associnfo.mutex);
 
@@ -356,7 +356,7 @@
 		/* force reassociation */
 		mac->associnfo.bssvalid = 0;
 		if (mac->associnfo.associated)
-			schedule_work(&mac->associnfo.work);
+			schedule_delayed_work(&mac->associnfo.work, 0);
 	} else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
 		/* the bssid we have is no longer fixed */
 		mac->associnfo.bssfixed = 0;
@@ -373,7 +373,7 @@
 		/* tell the other code that this bssid should be used no matter what */
 		mac->associnfo.bssfixed = 1;
 		/* queue associate if new bssid or (old one again and not associated) */
-		schedule_work(&mac->associnfo.work);
+		schedule_delayed_work(&mac->associnfo.work, 0);
         }
 
  out:
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 107bb6c..648f47c 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -45,8 +45,8 @@
 
 #include "fib_lookup.h"
 
-static kmem_cache_t *fn_hash_kmem __read_mostly;
-static kmem_cache_t *fn_alias_kmem __read_mostly;
+static struct kmem_cache *fn_hash_kmem __read_mostly;
+static struct kmem_cache *fn_alias_kmem __read_mostly;
 
 struct fib_node {
 	struct hlist_node	fn_hash;
@@ -485,13 +485,13 @@
 		goto out;
 
 	err = -ENOBUFS;
-	new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+	new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
 	if (new_fa == NULL)
 		goto out;
 
 	new_f = NULL;
 	if (!f) {
-		new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL);
+		new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL);
 		if (new_f == NULL)
 			goto out_free_new_fa;
 
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index d17990e..cfb249c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -172,7 +172,7 @@
 static struct tnode *halve(struct trie *t, struct tnode *tn);
 static void tnode_free(struct tnode *tn);
 
-static kmem_cache_t *fn_alias_kmem __read_mostly;
+static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct trie *trie_local = NULL, *trie_main = NULL;
 
 
@@ -1187,7 +1187,7 @@
 			u8 state;
 
 			err = -ENOBUFS;
-			new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+			new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
 			if (new_fa == NULL)
 				goto out;
 
@@ -1232,7 +1232,7 @@
 		goto out;
 
 	err = -ENOBUFS;
-	new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+	new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
 	if (new_fa == NULL)
 		goto out;
 
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 244c4f4..8c79c8a 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -27,11 +27,11 @@
  * Allocate and initialize a new local port bind bucket.
  * The bindhash mutex for snum's hash chain must be held here.
  */
-struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep,
+struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
 						 struct inet_bind_hashbucket *head,
 						 const unsigned short snum)
 {
-	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC);
+	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 
 	if (tb != NULL) {
 		tb->port      = snum;
@@ -45,7 +45,7 @@
 /*
  * Caller must hold hashbucket lock for this tb with local BH disabled
  */
-void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb)
+void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
 {
 	if (hlist_empty(&tb->owners)) {
 		__hlist_del(&tb->node);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index cdd8053..e28330a 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -91,7 +91,7 @@
 {
 	struct inet_timewait_sock *tw =
 		kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
-				 SLAB_ATOMIC);
+				 GFP_ATOMIC);
 	if (tw != NULL) {
 		const struct inet_sock *inet = inet_sk(sk);
 
@@ -197,9 +197,10 @@
 
 extern void twkill_slots_invalid(void);
 
-void inet_twdr_twkill_work(void *data)
+void inet_twdr_twkill_work(struct work_struct *work)
 {
-	struct inet_timewait_death_row *twdr = data;
+	struct inet_timewait_death_row *twdr =
+		container_of(work, struct inet_timewait_death_row, twkill_work);
 	int i;
 
 	if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index f072f38..711eb6d 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -73,7 +73,7 @@
 /* Exported for inet_getid inline function.  */
 DEFINE_SPINLOCK(inet_peer_idlock);
 
-static kmem_cache_t *peer_cachep __read_mostly;
+static struct kmem_cache *peer_cachep __read_mostly;
 
 #define node_height(x) x->avl_height
 static struct inet_peer peer_fake_node = {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index efcf45e..ecb5422 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -105,7 +105,7 @@
    In this case data path is free of exclusive locks at all.
  */
 
-static kmem_cache_t *mrt_cachep __read_mostly;
+static struct kmem_cache *mrt_cachep __read_mostly;
 
 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 8832eb5..8086787 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -44,7 +44,7 @@
 static struct list_head *ip_vs_conn_tab;
 
 /*  SLAB cache for IPVS connections */
-static kmem_cache_t *ip_vs_conn_cachep __read_mostly;
+static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
 
 /*  counter for current IPVS connections */
 static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f261616..9b93338 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -221,10 +221,10 @@
  *	Timer for checking the defense
  */
 #define DEFENSE_TIMER_PERIOD	1*HZ
-static void defense_work_handler(void *data);
-static DECLARE_WORK(defense_work, defense_work_handler, NULL);
+static void defense_work_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
 
-static void defense_work_handler(void *data)
+static void defense_work_handler(struct work_struct *work)
 {
 	update_defense_level();
 	if (atomic_read(&ip_vs_dropentry))
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index f4b0e68..8556a4f 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -65,8 +65,8 @@
 unsigned int ip_conntrack_htable_size __read_mostly = 0;
 int ip_conntrack_max __read_mostly;
 struct list_head *ip_conntrack_hash __read_mostly;
-static kmem_cache_t *ip_conntrack_cachep __read_mostly;
-static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly;
+static struct kmem_cache *ip_conntrack_cachep __read_mostly;
+static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly;
 struct ip_conntrack ip_conntrack_untracked;
 unsigned int ip_ct_log_invalid __read_mostly;
 static LIST_HEAD(unconfirmed);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6dddf59..4a3889d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -45,8 +45,7 @@
 	.tw_timer	= TIMER_INITIALIZER(inet_twdr_hangman, 0,
 					    (unsigned long)&tcp_death_row),
 	.twkill_work	= __WORK_INITIALIZER(tcp_death_row.twkill_work,
-					     inet_twdr_twkill_work,
-					     &tcp_death_row),
+					     inet_twdr_twkill_work),
 /* Short-time timewait calendar */
 
 	.twcal_hand	= -1,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 87c8f54..e5cd83b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -720,10 +720,8 @@
 {
 	if (ptr == NULL)
 		return;
-	if (ptr[0])
-		free_percpu(ptr[0]);
-	if (ptr[1])
-		free_percpu(ptr[1]);
+	free_percpu(ptr[0]);
+	free_percpu(ptr[1]);
 	ptr[0] = ptr[1] = NULL;
 }
 
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bf52611..96d8310 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -50,7 +50,7 @@
 
 struct rt6_statistics	rt6_stats;
 
-static kmem_cache_t * fib6_node_kmem __read_mostly;
+static struct kmem_cache * fib6_node_kmem __read_mostly;
 
 enum fib_walk_state_t
 {
@@ -150,7 +150,7 @@
 {
 	struct fib6_node *fn;
 
-	if ((fn = kmem_cache_alloc(fib6_node_kmem, SLAB_ATOMIC)) != NULL)
+	if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL)
 		memset(fn, 0, sizeof(struct fib6_node));
 
 	return fn;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 01a5c52..12e426b 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -50,7 +50,7 @@
 #define XFRM6_TUNNEL_SPI_MIN	1
 #define XFRM6_TUNNEL_SPI_MAX	0xffffffff
 
-static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
+static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
 
 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
@@ -180,7 +180,7 @@
 	spi = 0;
 	goto out;
 alloc_spi:
-	x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
+	x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
 	if (!x6spi)
 		goto out;
 
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index d50a020..262bda8 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -61,7 +61,7 @@
 static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
 static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
 static void ircomm_tty_hangup(struct tty_struct *tty);
-static void ircomm_tty_do_softint(void *private_);
+static void ircomm_tty_do_softint(struct work_struct *work);
 static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
 static void ircomm_tty_stop(struct tty_struct *tty);
 
@@ -389,7 +389,7 @@
 		self->flow = FLOW_STOP;
 
 		self->line = line;
-		INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self);
+		INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
 		self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
 		self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
 		self->close_delay = 5*HZ/10;
@@ -594,15 +594,16 @@
 }
 
 /*
- * Function ircomm_tty_do_softint (private_)
+ * Function ircomm_tty_do_softint (work)
  *
  *    We use this routine to give the write wakeup to the user at at a
  *    safe time (as fast as possible after write have completed). This 
  *    can be compared to the Tx interrupt.
  */
-static void ircomm_tty_do_softint(void *private_)
+static void ircomm_tty_do_softint(struct work_struct *work)
 {
-	struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_;
+	struct ircomm_tty_cb *self =
+		container_of(work, struct ircomm_tty_cb, tqueue);
 	struct tty_struct *tty;
 	unsigned long flags;
 	struct sk_buff *skb, *ctrl_skb;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index eaa0f8a..a9638ff 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -108,7 +108,7 @@
 	size_t size;
 
 	/* slab cache pointer */
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 
 	/* allocated slab cache + modules which uses this slab cache */
 	int use;
@@ -147,7 +147,7 @@
 {
 	int ret = 0;
 	char *cache_name;
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 
 	DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
 	       features, name, size);
@@ -226,7 +226,7 @@
 /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
 void nf_conntrack_unregister_cache(u_int32_t features)
 {
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 	char *name;
 
 	/*
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 588d379..c20f901 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -29,7 +29,7 @@
 LIST_HEAD(nf_conntrack_expect_list);
 EXPORT_SYMBOL_GPL(nf_conntrack_expect_list);
 
-kmem_cache_t *nf_conntrack_expect_cachep __read_mostly;
+struct kmem_cache *nf_conntrack_expect_cachep __read_mostly;
 static unsigned int nf_conntrack_expect_next_id;
 
 /* nf_conntrack_expect helper functions */
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index a98de0b..a5a6e19 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -92,7 +92,7 @@
 static DEFINE_SPINLOCK(hashlimit_lock);	/* protects htables list */
 static DEFINE_MUTEX(hlimit_mutex);	/* additional checkentry protection */
 static HLIST_HEAD(hashlimit_htables);
-static kmem_cache_t *hashlimit_cachep __read_mostly;
+static struct kmem_cache *hashlimit_cachep __read_mostly;
 
 static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
 {
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c
index dada34a..49effd9 100644
--- a/net/rxrpc/krxiod.c
+++ b/net/rxrpc/krxiod.c
@@ -13,6 +13,7 @@
 #include <linux/completion.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/freezer.h>
 #include <rxrpc/krxiod.h>
 #include <rxrpc/transport.h>
 #include <rxrpc/peer.h>
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
index cea4eb5..3ab0f77 100644
--- a/net/rxrpc/krxsecd.c
+++ b/net/rxrpc/krxsecd.c
@@ -27,6 +27,7 @@
 #include <rxrpc/call.h>
 #include <linux/udp.h>
 #include <linux/ip.h>
+#include <linux/freezer.h>
 #include <net/sock.h>
 #include "internal.h"
 
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c
index 3e74669..9a9b613 100644
--- a/net/rxrpc/krxtimod.c
+++ b/net/rxrpc/krxtimod.c
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/freezer.h>
 #include <rxrpc/rxrpc.h>
 #include <rxrpc/krxtimod.h>
 #include <asm/errno.h>
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 39471d3..ad0057d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -61,7 +61,7 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal functions. */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc);
+static void sctp_assoc_bh_rcv(struct work_struct *work);
 
 
 /* 1st Level Abstractions. */
@@ -269,9 +269,7 @@
 
 	/* Create an input queue.  */
 	sctp_inq_init(&asoc->base.inqueue);
-	sctp_inq_set_th_handler(&asoc->base.inqueue,
-				    (void (*)(void *))sctp_assoc_bh_rcv,
-				    asoc);
+	sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
 
 	/* Create an output queue.  */
 	sctp_outq_init(asoc, &asoc->outqueue);
@@ -946,8 +944,11 @@
 }
 
 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc)
+static void sctp_assoc_bh_rcv(struct work_struct *work)
 {
+	struct sctp_association *asoc =
+		container_of(work, struct sctp_association,
+			     base.inqueue.immediate);
 	struct sctp_endpoint *ep;
 	struct sctp_chunk *chunk;
 	struct sock *sk;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 33a42e9..1297569 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -61,7 +61,7 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal helpers. */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep);
+static void sctp_endpoint_bh_rcv(struct work_struct *work);
 
 /*
  * Initialize the base fields of the endpoint structure.
@@ -89,8 +89,7 @@
 	sctp_inq_init(&ep->base.inqueue);
 
 	/* Set its top-half handler */
-	sctp_inq_set_th_handler(&ep->base.inqueue,
-				(void (*)(void *))sctp_endpoint_bh_rcv, ep);
+	sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
 
 	/* Initialize the bind addr area */
 	sctp_bind_addr_init(&ep->base.bind_addr, 0);
@@ -318,8 +317,11 @@
 /* Do delayed input processing.  This is scheduled by sctp_rcv().
  * This may be called on BH or task time.
  */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep)
+static void sctp_endpoint_bh_rcv(struct work_struct *work)
 {
+	struct sctp_endpoint *ep =
+		container_of(work, struct sctp_endpoint,
+			     base.inqueue.immediate);
 	struct sctp_association *asoc;
 	struct sock *sk;
 	struct sctp_transport *transport;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cf6deed..71b0746 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -54,7 +54,7 @@
 	queue->in_progress = NULL;
 
 	/* Create a task for delivering data.  */
-	INIT_WORK(&queue->immediate, NULL, NULL);
+	INIT_WORK(&queue->immediate, NULL);
 
 	queue->malloced = 0;
 }
@@ -97,7 +97,7 @@
 	 * on the BH related data structures.
 	 */
 	list_add_tail(&chunk->list, &q->in_chunk_list);
-	q->immediate.func(q->immediate.data);
+	q->immediate.func(&q->immediate);
 }
 
 /* Extract a chunk from an SCTP inqueue.
@@ -205,9 +205,8 @@
  * The intent is that this routine will pull stuff out of the
  * inqueue and process it.
  */
-void sctp_inq_set_th_handler(struct sctp_inq *q,
-				 void (*callback)(void *), void *arg)
+void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
 {
-	INIT_WORK(&q->immediate, callback, arg);
+	INIT_WORK(&q->immediate, callback);
 }
 
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 11f3b54..f2ba861 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -79,8 +79,8 @@
 static struct sctp_af *sctp_af_v4_specific;
 static struct sctp_af *sctp_af_v6_specific;
 
-kmem_cache_t *sctp_chunk_cachep __read_mostly;
-kmem_cache_t *sctp_bucket_cachep __read_mostly;
+struct kmem_cache *sctp_chunk_cachep __read_mostly;
+struct kmem_cache *sctp_bucket_cachep __read_mostly;
 
 /* Return the address of the control sock. */
 struct sock *sctp_get_ctl_sock(void)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 04954e5..30927d3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -65,7 +65,7 @@
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-extern kmem_cache_t *sctp_chunk_cachep;
+extern struct kmem_cache *sctp_chunk_cachep;
 
 SCTP_STATIC
 struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
@@ -979,7 +979,7 @@
 {
 	struct sctp_chunk *retval;
 
-	retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC);
+	retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC);
 
 	if (!retval)
 		goto nodata;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 02b2714..1e8132b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -107,7 +107,7 @@
 			      struct sctp_association *, sctp_socket_type_t);
 static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
 
-extern kmem_cache_t *sctp_bucket_cachep;
+extern struct kmem_cache *sctp_bucket_cachep;
 
 /* Get the sndbuf space available at the time on the association.  */
 static inline int sctp_wspace(struct sctp_association *asoc)
@@ -4989,7 +4989,7 @@
 {
 	struct sctp_bind_bucket *pp;
 
-	pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC);
+	pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
 	SCTP_DBG_OBJCNT_INC(bind_bucket);
 	if (pp) {
 		pp->port = snum;
diff --git a/net/socket.c b/net/socket.c
index e8db547..29ea1de 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -230,13 +230,13 @@
 
 #define SOCKFS_MAGIC 0x534F434B
 
-static kmem_cache_t *sock_inode_cachep __read_mostly;
+static struct kmem_cache *sock_inode_cachep __read_mostly;
 
 static struct inode *sock_alloc_inode(struct super_block *sb)
 {
 	struct socket_alloc *ei;
 
-	ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	init_waitqueue_head(&ei->socket.wait);
@@ -257,7 +257,7 @@
 			container_of(inode, struct socket_alloc, vfs_inode));
 }
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct socket_alloc *ei = (struct socket_alloc *)foo;
 
@@ -305,7 +305,14 @@
 
 static int sockfs_delete_dentry(struct dentry *dentry)
 {
-	return 1;
+	/*
+	 * At creation time, we pretended this dentry was hashed
+	 * (by clearing DCACHE_UNHASHED bit in d_flags)
+	 * At delete time, we restore the truth : not hashed.
+	 * (so that dput() can proceed correctly)
+	 */
+	dentry->d_flags |= DCACHE_UNHASHED;
+	return 0;
 }
 static struct dentry_operations sockfs_dentry_operations = {
 	.d_delete = sockfs_delete_dentry,
@@ -353,14 +360,20 @@
 
 	this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
 	this.name = name;
-	this.hash = SOCK_INODE(sock)->i_ino;
+	this.hash = 0;
 
 	file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
 	if (unlikely(!file->f_dentry))
 		return -ENOMEM;
 
 	file->f_dentry->d_op = &sockfs_dentry_operations;
-	d_add(file->f_dentry, SOCK_INODE(sock));
+	/*
+	 * We dont want to push this dentry into global dentry hash table.
+	 * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
+	 * This permits a working /proc/$pid/fd/XXX on sockets
+	 */
+	file->f_dentry->d_flags &= ~DCACHE_UNHASHED;
+	d_instantiate(file->f_dentry, SOCK_INODE(sock));
 	file->f_vfsmnt = mntget(sock_mnt);
 	file->f_mapping = file->f_dentry->d_inode->i_mapping;
 
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 00cb388..d96fd46 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -284,8 +284,8 @@
 static struct file_operations content_file_operations;
 static struct file_operations cache_flush_operations;
 
-static void do_cache_clean(void *data);
-static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
+static void do_cache_clean(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
 
 void cache_register(struct cache_detail *cd)
 {
@@ -337,7 +337,7 @@
 	spin_unlock(&cache_list_lock);
 
 	/* start the cleaning process */
-	schedule_work(&cache_cleaner);
+	schedule_delayed_work(&cache_cleaner, 0);
 }
 
 int cache_unregister(struct cache_detail *cd)
@@ -461,7 +461,7 @@
 /*
  * We want to regularly clean the cache, so we need to schedule some work ...
  */
-static void do_cache_clean(void *data)
+static void do_cache_clean(struct work_struct *work)
 {
 	int delay = 5;
 	if (cache_clean() == -1)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9a0b41a..19703aa 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -33,7 +33,7 @@
 static struct file_system_type rpc_pipe_fs_type;
 
 
-static kmem_cache_t *rpc_inode_cachep __read_mostly;
+static struct kmem_cache *rpc_inode_cachep __read_mostly;
 
 #define RPC_UPCALL_TIMEOUT (30*HZ)
 
@@ -54,10 +54,11 @@
 }
 
 static void
-rpc_timeout_upcall_queue(void *data)
+rpc_timeout_upcall_queue(struct work_struct *work)
 {
 	LIST_HEAD(free_list);
-	struct rpc_inode *rpci = (struct rpc_inode *)data;
+	struct rpc_inode *rpci =
+		container_of(work, struct rpc_inode, queue_timeout.work);
 	struct inode *inode = &rpci->vfs_inode;
 	void (*destroy_msg)(struct rpc_pipe_msg *);
 
@@ -142,7 +143,7 @@
 rpc_alloc_inode(struct super_block *sb)
 {
 	struct rpc_inode *rpci;
-	rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL);
+	rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
 	if (!rpci)
 		return NULL;
 	return &rpci->vfs_inode;
@@ -823,7 +824,7 @@
 };
 
 static void
-init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
 
@@ -837,7 +838,8 @@
 		INIT_LIST_HEAD(&rpci->pipe);
 		rpci->pipelen = 0;
 		init_waitqueue_head(&rpci->waitq);
-		INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
+		INIT_DELAYED_WORK(&rpci->queue_timeout,
+				    rpc_timeout_upcall_queue);
 		rpci->ops = NULL;
 	}
 }
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index a1ab4ee..225e651 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -34,14 +34,14 @@
 #define RPC_BUFFER_MAXSIZE	(2048)
 #define RPC_BUFFER_POOLSIZE	(8)
 #define RPC_TASK_POOLSIZE	(8)
-static kmem_cache_t	*rpc_task_slabp __read_mostly;
-static kmem_cache_t	*rpc_buffer_slabp __read_mostly;
+static struct kmem_cache	*rpc_task_slabp __read_mostly;
+static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
 static mempool_t	*rpc_task_mempool __read_mostly;
 static mempool_t	*rpc_buffer_mempool __read_mostly;
 
 static void			__rpc_default_timer(struct rpc_task *task);
 static void			rpciod_killall(void);
-static void			rpc_async_schedule(void *);
+static void			rpc_async_schedule(struct work_struct *);
 
 /*
  * RPC tasks sit here while waiting for conditions to improve.
@@ -305,7 +305,7 @@
 	if (RPC_IS_ASYNC(task)) {
 		int status;
 
-		INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
+		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 		status = queue_work(task->tk_workqueue, &task->u.tk_work);
 		if (status < 0) {
 			printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
@@ -695,9 +695,9 @@
 	return __rpc_execute(task);
 }
 
-static void rpc_async_schedule(void *arg)
+static void rpc_async_schedule(struct work_struct *work)
 {
-	__rpc_execute((struct rpc_task *)arg);
+	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
 }
 
 /**
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index ee9bb15..c7bb5f7 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -119,7 +119,8 @@
 #define	DN_HASHMASK	(DN_HASHMAX-1)
 
 static struct hlist_head	auth_domain_table[DN_HASHMAX];
-static spinlock_t	auth_domain_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t	auth_domain_lock =
+	__SPIN_LOCK_UNLOCKED(auth_domain_lock);
 
 void auth_domain_put(struct auth_domain *dom)
 {
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 64ca1f6..99f54fb 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -32,6 +32,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/file.h>
+#include <linux/freezer.h>
 #include <net/sock.h>
 #include <net/checksum.h>
 #include <net/ip.h>
@@ -84,6 +85,35 @@
  */
 static int svc_conn_age_period = 6*60;
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key svc_key[2];
+static struct lock_class_key svc_slock_key[2];
+
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	BUG_ON(sk->sk_lock.owner != NULL);
+	switch (sk->sk_family) {
+	case AF_INET:
+		sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
+		    &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
+		break;
+
+	case AF_INET6:
+		sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
+		    &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
+		break;
+
+	default:
+		BUG();
+	}
+}
+#else
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
 /*
  * Queue up an idle server thread.  Must have pool->sp_lock held.
  * Note: this is really a stack rather than a queue, so that we only
@@ -1556,6 +1586,8 @@
 	if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
 		return error;
 
+	svc_reclassify_socket(sock);
+
 	if (type == SOCK_STREAM)
 		sock->sk->sk_reuse = 1; /* allow address reuse */
 	error = kernel_bind(sock, (struct sockaddr *) sin,
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 8085747..4f9a5d9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -479,9 +479,10 @@
 	return status;
 }
 
-static void xprt_autoclose(void *args)
+static void xprt_autoclose(struct work_struct *work)
 {
-	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+	struct rpc_xprt *xprt =
+		container_of(work, struct rpc_xprt, task_cleanup);
 
 	xprt_disconnect(xprt);
 	xprt->ops->close(xprt);
@@ -932,7 +933,7 @@
 
 	INIT_LIST_HEAD(&xprt->free);
 	INIT_LIST_HEAD(&xprt->recv);
-	INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
+	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
 	init_timer(&xprt->timer);
 	xprt->timer.function = xprt_init_autodisconnect;
 	xprt->timer.data = (unsigned long) xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 757fc91..2fc4a31 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1058,15 +1058,45 @@
 	return err;
 }
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key xs_key[2];
+static struct lock_class_key xs_slock_key[2];
+
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	BUG_ON(sk->sk_lock.owner != NULL);
+	switch (sk->sk_family) {
+	case AF_INET:
+		sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS",
+			&xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]);
+		break;
+
+	case AF_INET6:
+		sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS",
+			&xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]);
+		break;
+
+	default:
+		BUG();
+	}
+}
+#else
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
 /**
  * xs_udp_connect_worker - set up a UDP socket
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
  *
  * Invoked by a work queue tasklet.
  */
-static void xs_udp_connect_worker(void *args)
+static void xs_udp_connect_worker(struct work_struct *work)
 {
-	struct rpc_xprt *xprt = (struct rpc_xprt *) args;
+	struct rpc_xprt *xprt =
+		container_of(work, struct rpc_xprt, connect_worker.work);
 	struct socket *sock = xprt->sock;
 	int err, status = -EIO;
 
@@ -1080,6 +1110,7 @@
 		dprintk("RPC:      can't create UDP transport socket (%d).\n", -err);
 		goto out;
 	}
+	xs_reclassify_socket(sock);
 
 	if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
 		sock_release(sock);
@@ -1144,13 +1175,14 @@
 
 /**
  * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
  *
  * Invoked by a work queue tasklet.
  */
-static void xs_tcp_connect_worker(void *args)
+static void xs_tcp_connect_worker(struct work_struct *work)
 {
-	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+	struct rpc_xprt *xprt =
+		container_of(work, struct rpc_xprt, connect_worker.work);
 	struct socket *sock = xprt->sock;
 	int err, status = -EIO;
 
@@ -1163,6 +1195,7 @@
 			dprintk("RPC:      can't create TCP transport socket (%d).\n", -err);
 			goto out;
 		}
+		xs_reclassify_socket(sock);
 
 		if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
 			sock_release(sock);
@@ -1262,7 +1295,7 @@
 			xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
 	} else {
 		dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
-		schedule_work(&xprt->connect_worker);
+		schedule_delayed_work(&xprt->connect_worker, 0);
 
 		/* flush_scheduled_work can sleep... */
 		if (!RPC_IS_ASYNC(task))
@@ -1375,7 +1408,7 @@
 	/* XXX: header size can vary due to auth type, IPv6, etc. */
 	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
 
-	INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+	INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker);
 	xprt->bind_timeout = XS_BIND_TO;
 	xprt->connect_timeout = XS_UDP_CONN_TO;
 	xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1453,7 @@
 	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
 	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
 
-	INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+	INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker);
 	xprt->bind_timeout = XS_BIND_TO;
 	xprt->connect_timeout = XS_TCP_CONN_TO;
 	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index ae6ddf0..eb80778 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -42,7 +42,7 @@
 	unsigned long data;
 };
 
-static kmem_cache_t *tipc_queue_item_cache;
+static struct kmem_cache *tipc_queue_item_cache;
 static struct list_head signal_queue_head;
 static DEFINE_SPINLOCK(qitem_lock);
 static int handler_enabled = 0;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index e8198a2..414f890 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -12,7 +12,7 @@
 #include <net/ip.h>
 #include <net/xfrm.h>
 
-static kmem_cache_t *secpath_cachep __read_mostly;
+static struct kmem_cache *secpath_cachep __read_mostly;
 
 void __secpath_destroy(struct sec_path *sp)
 {
@@ -27,7 +27,7 @@
 {
 	struct sec_path *sp;
 
-	sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC);
+	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
 	if (!sp)
 		return NULL;
 
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 64d3938..3f3f563 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -39,7 +39,7 @@
 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
 
-static kmem_cache_t *xfrm_dst_cache __read_mostly;
+static struct kmem_cache *xfrm_dst_cache __read_mostly;
 
 static struct work_struct xfrm_policy_gc_work;
 static HLIST_HEAD(xfrm_policy_gc_list);
@@ -392,7 +392,7 @@
 	xfrm_pol_put(policy);
 }
 
-static void xfrm_policy_gc_task(void *data)
+static void xfrm_policy_gc_task(struct work_struct *work)
 {
 	struct xfrm_policy *policy;
 	struct hlist_node *entry, *tmp;
@@ -580,7 +580,7 @@
 
 static DEFINE_MUTEX(hash_resize_mutex);
 
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
 {
 	int dir, total;
 
@@ -597,7 +597,7 @@
 	mutex_unlock(&hash_resize_mutex);
 }
 
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
 
 /* Generate new index... KAME seems to generate them ordered by cost
  * of an absolute inpredictability of ordering of rules. This will not pass. */
@@ -2116,7 +2116,7 @@
 			panic("XFRM: failed to allocate bydst hash\n");
 	}
 
-	INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
+	INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
 	register_netdevice_notifier(&xfrm_dev_notifier);
 }
 
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 864962b..da54a64 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -115,7 +115,7 @@
 
 static DEFINE_MUTEX(hash_resize_mutex);
 
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
 {
 	struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
 	unsigned long nsize, osize;
@@ -168,7 +168,7 @@
 	mutex_unlock(&hash_resize_mutex);
 }
 
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
 
 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
 EXPORT_SYMBOL(km_waitq);
@@ -207,7 +207,7 @@
 	kfree(x);
 }
 
-static void xfrm_state_gc_task(void *data)
+static void xfrm_state_gc_task(struct work_struct *data)
 {
 	struct xfrm_state *x;
 	struct hlist_node *entry, *tmp;
@@ -1568,6 +1568,6 @@
 		panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
 	xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
 
-	INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
+	INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
 }
 
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 338bdea..f5628c5 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -798,7 +798,7 @@
 			QAction *action;
 
 			headerPopup = new QPopupMenu(this);
-			action = new QAction("Show Name", 0, this);
+			action = new QAction(NULL, "Show Name", 0, this);
 			  action->setToggleAction(TRUE);
 			  connect(action, SIGNAL(toggled(bool)),
 				  parent(), SLOT(setShowName(bool)));
@@ -806,7 +806,7 @@
 				  action, SLOT(setOn(bool)));
 			  action->setOn(showName);
 			  action->addTo(headerPopup);
-			action = new QAction("Show Range", 0, this);
+			action = new QAction(NULL, "Show Range", 0, this);
 			  action->setToggleAction(TRUE);
 			  connect(action, SIGNAL(toggled(bool)),
 				  parent(), SLOT(setShowRange(bool)));
@@ -814,7 +814,7 @@
 				  action, SLOT(setOn(bool)));
 			  action->setOn(showRange);
 			  action->addTo(headerPopup);
-			action = new QAction("Show Data", 0, this);
+			action = new QAction(NULL, "Show Data", 0, this);
 			  action->setToggleAction(TRUE);
 			  connect(action, SIGNAL(toggled(bool)),
 				  parent(), SLOT(setShowData(bool)));
@@ -1161,7 +1161,7 @@
 QPopupMenu* ConfigInfoView::createPopupMenu(const QPoint& pos)
 {
 	QPopupMenu* popup = Parent::createPopupMenu(pos);
-	QAction* action = new QAction("Show Debug Info", 0, popup);
+	QAction* action = new QAction(NULL,"Show Debug Info", 0, popup);
 	  action->setToggleAction(TRUE);
 	  connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
 	  connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 187f5de..df3b272 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1430,7 +1430,7 @@
 	    # corresponding data structures "correctly". Catch it later in
 	    # output_* subs.
 	    push_parameter($arg, "", $file);
-	} elsif ($arg =~ m/\(/) {
+	} elsif ($arg =~ m/\(.*\*/) {
 	    # pointer-to-function
 	    $arg =~ tr/#/,/;
 	    $arg =~ m/[^\(]+\(\*([^\)]+)\)/;
diff --git a/scripts/ver_linux b/scripts/ver_linux
index 84999f6..72876df 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -48,6 +48,8 @@
 xfs_db -V 2>&1 | grep version | awk \
 'NR==1{print "xfsprogs              ", $3}'
 
+pccardctl -V 2>&1| grep pcmciautils | awk '{print "pcmciautils           ", $2}'
+
 cardmgr -V 2>&1| grep version | awk \
 'NR==1{print "pcmcia-cs             ", $3}'
 
@@ -87,10 +89,16 @@
 loadkeys -V 2>&1 | awk \
 '(NR==1 && ($2 ~ /console-tools/)) {print "Console-tools         ", $3}'
 
+oprofiled --version 2>&1 | awk \
+'(NR==1 && ($2 == "oprofile")) {print "oprofile              ", $3}'
+
 expr --v 2>&1 | awk 'NR==1{print "Sh-utils              ", $NF}'
 
 udevinfo -V 2>&1 | grep version | awk '{print "udev                  ", $3}'
 
+iwconfig --version 2>&1 | awk \
+'(NR==1 && ($3 == "version")) {print "wireless-tools        ",$4}'
+
 if [ -e /proc/modules ]; then
     X=`cat /proc/modules | sed -e "s/ .*$//"`
     echo "Modules Loaded         "$X
diff --git a/security/keys/key.c b/security/keys/key.c
index 80de8c3..ac9326c 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -20,7 +20,7 @@
 #include <linux/err.h>
 #include "internal.h"
 
-static kmem_cache_t	*key_jar;
+static struct kmem_cache	*key_jar;
 struct rb_root		key_serial_tree; /* tree of keys indexed by serial */
 DEFINE_SPINLOCK(key_serial_lock);
 
@@ -30,8 +30,8 @@
 static LIST_HEAD(key_types_list);
 static DECLARE_RWSEM(key_types_sem);
 
-static void key_cleanup(void *data);
-static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
+static void key_cleanup(struct work_struct *work);
+static DECLARE_WORK(key_cleanup_task, key_cleanup);
 
 /* we serialise key instantiation and link */
 DECLARE_RWSEM(key_construction_sem);
@@ -285,16 +285,14 @@
 	}
 
 	/* allocate and initialise the key and its description */
-	key = kmem_cache_alloc(key_jar, SLAB_KERNEL);
+	key = kmem_cache_alloc(key_jar, GFP_KERNEL);
 	if (!key)
 		goto no_memory_2;
 
 	if (desc) {
-		key->description = kmalloc(desclen, GFP_KERNEL);
+		key->description = kmemdup(desc, desclen, GFP_KERNEL);
 		if (!key->description)
 			goto no_memory_3;
-
-		memcpy(key->description, desc, desclen);
 	}
 
 	atomic_set(&key->usage, 1);
@@ -552,7 +550,7 @@
  * do cleaning up in process context so that we don't have to disable
  * interrupts all over the place
  */
-static void key_cleanup(void *data)
+static void key_cleanup(struct work_struct *work)
 {
 	struct rb_node *_n;
 	struct key *key;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e8d02ac..ad45ce7 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -706,12 +706,10 @@
 				BUG_ON(size > PAGE_SIZE);
 
 				ret = -ENOMEM;
-				nklist = kmalloc(size, GFP_KERNEL);
+				nklist = kmemdup(klist, size, GFP_KERNEL);
 				if (!nklist)
 					goto error2;
 
-				memcpy(nklist, klist, size);
-
 				/* replace matched key */
 				atomic_inc(&key->usage);
 				nklist->keys[loop] = key;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 32150cf..b6f8680 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -27,7 +27,7 @@
 struct key_user root_key_user = {
 	.usage		= ATOMIC_INIT(3),
 	.consq		= LIST_HEAD_INIT(root_key_user.consq),
-	.lock		= SPIN_LOCK_UNLOCKED,
+	.lock		= __SPIN_LOCK_UNLOCKED(root_key_user.lock),
 	.nkeys		= ATOMIC_INIT(2),
 	.nikeys		= ATOMIC_INIT(2),
 	.uid		= 0,
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index e73ac1a..e7c0b5e 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -124,7 +124,7 @@
 
 static struct avc_cache avc_cache;
 static struct avc_callback_node *avc_callbacks;
-static kmem_cache_t *avc_node_cachep;
+static struct kmem_cache *avc_node_cachep;
 
 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
 {
@@ -332,7 +332,7 @@
 {
 	struct avc_node *node;
 
-	node = kmem_cache_alloc(avc_node_cachep, SLAB_ATOMIC);
+	node = kmem_cache_alloc(avc_node_cachep, GFP_ATOMIC);
 	if (!node)
 		goto out;
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 78f98fe..44e9cd4 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -124,7 +124,7 @@
 static LIST_HEAD(superblock_security_head);
 static DEFINE_SPINLOCK(sb_security_lock);
 
-static kmem_cache_t *sel_inode_cache;
+static struct kmem_cache *sel_inode_cache;
 
 /* Return security context for a given sid or just the context 
    length if the buffer is null or length is 0 */
@@ -181,7 +181,7 @@
 	struct task_security_struct *tsec = current->security;
 	struct inode_security_struct *isec;
 
-	isec = kmem_cache_alloc(sel_inode_cache, SLAB_KERNEL);
+	isec = kmem_cache_alloc(sel_inode_cache, GFP_KERNEL);
 	if (!isec)
 		return -ENOMEM;
 
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index d049c7a..ebb993c 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -28,7 +28,7 @@
  (keyp->source_type << 9)) & \
  AVTAB_HASH_MASK)
 
-static kmem_cache_t *avtab_node_cachep;
+static struct kmem_cache *avtab_node_cachep;
 
 static struct avtab_node*
 avtab_insert_node(struct avtab *h, int hvalue,
@@ -36,7 +36,7 @@
 		  struct avtab_key *key, struct avtab_datum *datum)
 {
 	struct avtab_node * newnode;
-	newnode = kmem_cache_alloc(avtab_node_cachep, SLAB_KERNEL);
+	newnode = kmem_cache_alloc(avtab_node_cachep, GFP_KERNEL);
 	if (newnode == NULL)
 		return NULL;
 	memset(newnode, 0, sizeof(struct avtab_node));
diff --git a/sound/aoa/aoa-gpio.h b/sound/aoa/aoa-gpio.h
index 3a61f31..ee64f5d 100644
--- a/sound/aoa/aoa-gpio.h
+++ b/sound/aoa/aoa-gpio.h
@@ -59,10 +59,10 @@
 };
 
 struct gpio_notification {
+	struct delayed_work work;
 	notify_func_t notify;
 	void *data;
 	void *gpio_private;
-	struct work_struct work;
 	struct mutex mutex;
 };
 
diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c
index 40eb47e..2b03bc7 100644
--- a/sound/aoa/core/snd-aoa-gpio-feature.c
+++ b/sound/aoa/core/snd-aoa-gpio-feature.c
@@ -195,9 +195,10 @@
 	ftr_gpio_set_lineout(rt, (s>>2)&1);
 }
 
-static void ftr_handle_notify(void *data)
+static void ftr_handle_notify(struct work_struct *work)
 {
-	struct gpio_notification *notif = data;
+	struct gpio_notification *notif =
+		container_of(work, struct gpio_notification, work.work);
 
 	mutex_lock(&notif->mutex);
 	if (notif->notify)
@@ -253,12 +254,9 @@
 
 	ftr_gpio_all_amps_off(rt);
 	rt->implementation_private = 0;
-	INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify,
-		  &rt->headphone_notify);
-	INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify,
-		  &rt->line_in_notify);
-	INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify,
-		  &rt->line_out_notify);
+	INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
+	INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
+	INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
 	mutex_init(&rt->headphone_notify.mutex);
 	mutex_init(&rt->line_in_notify.mutex);
 	mutex_init(&rt->line_out_notify.mutex);
@@ -287,7 +285,7 @@
 {
 	struct gpio_notification *notif = data;
 
-	schedule_work(&notif->work);
+	schedule_delayed_work(&notif->work, 0);
 
 	return IRQ_HANDLED;
 }
diff --git a/sound/aoa/core/snd-aoa-gpio-pmf.c b/sound/aoa/core/snd-aoa-gpio-pmf.c
index 2836c32..5ca2220 100644
--- a/sound/aoa/core/snd-aoa-gpio-pmf.c
+++ b/sound/aoa/core/snd-aoa-gpio-pmf.c
@@ -69,9 +69,10 @@
 	pmf_gpio_set_lineout(rt, (s>>2)&1);
 }
 
-static void pmf_handle_notify(void *data)
+static void pmf_handle_notify(struct work_struct *work)
 {
-	struct gpio_notification *notif = data;
+	struct gpio_notification *notif =
+		container_of(work, struct gpio_notification, work.work);
 
 	mutex_lock(&notif->mutex);
 	if (notif->notify)
@@ -83,12 +84,9 @@
 {
 	pmf_gpio_all_amps_off(rt);
 	rt->implementation_private = 0;
-	INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify,
-		  &rt->headphone_notify);
-	INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify,
-		  &rt->line_in_notify);
-	INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify,
-		  &rt->line_out_notify);
+	INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify);
+	INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify);
+	INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify);
 	mutex_init(&rt->headphone_notify.mutex);
 	mutex_init(&rt->line_in_notify.mutex);
 	mutex_init(&rt->line_out_notify.mutex);
@@ -129,7 +127,7 @@
 {
 	struct gpio_notification *notif = data;
 
-	schedule_work(&notif->work);
+	schedule_delayed_work(&notif->work, 0);
 }
 
 static int pmf_set_notify(struct gpio_runtime *rt,
diff --git a/sound/arm/sa11xx-uda1341.c b/sound/arm/sa11xx-uda1341.c
index c79a9af..c7e1b26 100644
--- a/sound/arm/sa11xx-uda1341.c
+++ b/sound/arm/sa11xx-uda1341.c
@@ -125,7 +125,7 @@
 #else
 	dma_regs_t *dma_regs;	/* points to our DMA registers */
 #endif
-	int active:1;		/* we are using this stream for transfer now */
+	unsigned int active:1;	/* we are using this stream for transfer now */
 	int period;		/* current transfer period */
 	int periods;		/* current count of periods registerd in the DMA engine */
 	int tx_spin;		/* are we recoding - flag used to do DMA trans. for sync */
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 66e24b5..6ea67b1 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3027,7 +3027,7 @@
 	struct page * page;
 	
 	if (substream == NULL)
-		return NOPAGE_OOM;
+		return NOPAGE_SIGBUS;
 	runtime = substream->runtime;
 	page = virt_to_page(runtime->status);
 	get_page(page);
@@ -3070,7 +3070,7 @@
 	struct page * page;
 	
 	if (substream == NULL)
-		return NOPAGE_OOM;
+		return NOPAGE_SIGBUS;
 	runtime = substream->runtime;
 	page = virt_to_page(runtime->control);
 	get_page(page);
@@ -3131,18 +3131,18 @@
 	size_t dma_bytes;
 	
 	if (substream == NULL)
-		return NOPAGE_OOM;
+		return NOPAGE_SIGBUS;
 	runtime = substream->runtime;
 	offset = area->vm_pgoff << PAGE_SHIFT;
 	offset += address - area->vm_start;
-	snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_OOM);
+	snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_SIGBUS);
 	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
 	if (offset > dma_bytes - PAGE_SIZE)
 		return NOPAGE_SIGBUS;
 	if (substream->ops->page) {
 		page = substream->ops->page(substream, offset);
 		if (! page)
-			return NOPAGE_OOM;
+			return NOPAGE_OOM; /* XXX: is this really due to OOM? */
 	} else {
 		vaddr = runtime->dma_area + offset;
 		page = virt_to_page(vaddr);
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 12ffffc..d2f2c50 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -35,7 +35,7 @@
 
 #define AK4114_ADDR			0x00 /* fixed address */
 
-static void ak4114_stats(void *);
+static void ak4114_stats(struct work_struct *work);
 
 static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val)
 {
@@ -158,7 +158,7 @@
 	reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN);
 	/* bring up statistics / event queing */
 	chip->init = 0;
-	INIT_WORK(&chip->work, ak4114_stats, chip);
+	INIT_DELAYED_WORK(&chip->work, ak4114_stats);
 	queue_delayed_work(chip->workqueue, &chip->work, HZ / 10);
 }
 
@@ -561,9 +561,9 @@
 	return res;
 }
 
-static void ak4114_stats(void *data)
+static void ak4114_stats(struct work_struct *work)
 {
-	struct ak4114 *chip = (struct ak4114 *)data;
+	struct ak4114 *chip = container_of(work, struct ak4114, work.work);
 
 	if (chip->init)
 		return;
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index cc2b9ab..a0588c2 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -5,20 +5,6 @@
 #
 # Prompt user for primary drivers.
 
-config OSS_OBSOLETE_DRIVER
-	bool "Obsolete OSS drivers"
-	depends on SOUND_PRIME
-	help
-	  This option enables support for obsolete OSS drivers that
-	  are scheduled for removal in the near future since there
-	  are ALSA drivers for the same hardware.
-
-	  Please contact Adrian Bunk <bunk@stusta.de> if you had to
-	  say Y here because your soundcard is not properly supported
-	  by ALSA.
-
-	  If unsure, say N.
-
 config SOUND_BT878
 	tristate "BT878 audio dma"
 	depends on SOUND_PRIME && PCI
@@ -35,40 +21,6 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called btaudio.
 
-config SOUND_EMU10K1
-	tristate "Creative SBLive! (EMU10K1)"
-	depends on SOUND_PRIME && PCI && OSS_OBSOLETE_DRIVER
-	---help---
-	  Say Y or M if you have a PCI sound card using the EMU10K1 chipset,
-	  such as the Creative SBLive!, SB PCI512 or Emu-APS.
-
-	  For more information on this driver and the degree of support for
-	  the different card models please check:
-
-		<http://sourceforge.net/projects/emu10k1/>
-
-	  It is now possible to load dsp microcode patches into the EMU10K1
-	  chip.  These patches are used to implement real time sound
-	  processing effects which include for example: signal routing,
-	  bass/treble control, AC3 passthrough, ...
-	  Userspace tools to create new patches and load/unload them can be
-	  found in the emu-tools package at the above URL.
-
-config MIDI_EMU10K1
-	bool "Creative SBLive! MIDI (EXPERIMENTAL)"
-	depends on SOUND_EMU10K1 && EXPERIMENTAL && ISA_DMA_API
-	help
-	  Say Y if you want to be able to use the OSS /dev/sequencer
-	  interface.  This code is still experimental.
-
-config SOUND_FUSION
-	tristate "Crystal SoundFusion (CS4280/461x)"
-	depends on SOUND_PRIME && PCI && OSS_OBSOLETE_DRIVER
-	help
-	  This module drives the Crystal SoundFusion devices (CS4280/46xx
-	  series) when wired as native sound drivers with AC97 codecs.  If
-	  this driver does not work try the CS4232 driver.
-
 config SOUND_BCM_CS4297A
 	tristate "Crystal Sound CS4297a (for Swarm)"
 	depends on SOUND_PRIME && SIBYTE_SWARM
@@ -448,47 +400,6 @@
 
 	  Say Y unless you have 16MB or more RAM or a PCI sound card.
 
-config SOUND_AD1816
-	tristate "AD1816(A) based cards (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && SOUND_OSS && OSS_OBSOLETE_DRIVER
-	help
-	  Say M here if you have a sound card based on the Analog Devices
-	  AD1816(A) chip.
-
-	  If you compile the driver into the kernel, you have to add
-	  "ad1816=<io>,<irq>,<dma>,<dma2>" to the kernel command line.
-
-config SOUND_AD1889
-	tristate "AD1889 based cards (AD1819 codec) (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && SOUND_OSS && PCI && OSS_OBSOLETE_DRIVER
-	help
-	  Say M here if you have a sound card based on the Analog Devices
-	  AD1889 chip.
-
-config SOUND_ADLIB
-	tristate "Adlib Cards"
-	depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-	help
-	  Includes ASB 64 4D. Information on programming AdLib cards is
-	  available at <http://www.itsnet.com/home/ldragon/Specs/adlib.html>.
-
-config SOUND_ACI_MIXER
-	tristate "ACI mixer (miroSOUND PCM1-pro/PCM12/PCM20)"
-	depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-	---help---
-	  ACI (Audio Command Interface) is a protocol used to communicate with
-	  the microcontroller on some sound cards produced by miro and
-	  Cardinal Technologies.  The main function of the ACI is to control
-	  the mixer and to get a product identification.
-
-	  This VoxWare ACI driver currently supports the ACI functions on the
-	  miroSOUND PCM1-pro, PCM12 and PCM20 radio. On the PCM20 radio, ACI
-	  also controls the radio tuner. This is supported in the video4linux
-	  miropcm20 driver (say M or Y here and go back to "Multimedia
-	  devices" -> "Radio Adapters").
-
-	  This driver is also available as a module and will be called aci.
-
 config SOUND_CS4232
 	tristate "Crystal CS4232 based (PnP) cards"
 	depends on SOUND_OSS
@@ -594,18 +505,6 @@
 	  If you compile the driver into the kernel, you have to add
 	  "mpu401=<io>,<irq>" to the kernel command line.
 
-config SOUND_NM256
-	tristate "NM256AV/NM256ZX audio support"
-	depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-	help
-	  Say M here to include audio support for the NeoMagic 256AV/256ZX
-	  chipsets. These are the audio chipsets found in the Sony
-	  Z505S/SX/DX, some Sony F-series, and the Dell Latitude CPi and CPt
-	  laptops. It includes support for an AC97-compatible mixer and an
-	  apparently proprietary sound engine.
-
-	  See <file:Documentation/sound/oss/NM256> for further information.
-
 config SOUND_PAS
 	tristate "ProAudioSpectrum 16 support"
 	depends on SOUND_OSS
@@ -714,20 +613,6 @@
 
 	  If unsure, say Y.
 
-config SOUND_OPL3SA2
-	tristate "Yamaha OPL3-SA2 and SA3 based PnP cards"
-	depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-	help
-	  Say Y or M if you have a card based on one of these Yamaha sound
-	  chipsets or the "SAx", which is actually a SA3. Read
-	  <file:Documentation/sound/oss/OPL3-SA2> for more information on
-	  configuring these cards.
-
-	  If you compile the driver into the kernel and do not also
-	  configure in the optional ISA PnP support, you will have to add
-	  "opl3sa2=<io>,<irq>,<dma>,<dma2>,<mssio>,<mpuio>" to the kernel
-	  command line.
-
 config SOUND_UART6850
 	tristate "6850 UART support"
 	depends on SOUND_OSS
diff --git a/sound/oss/btaudio.c b/sound/oss/btaudio.c
index 6ad3841..ad7210a 100644
--- a/sound/oss/btaudio.c
+++ b/sound/oss/btaudio.c
@@ -1020,6 +1020,7 @@
  fail2:
         free_irq(bta->irq,bta);	
  fail1:
+	iounmap(bta->mmio);
 	kfree(bta);
  fail0:
 	release_mem_region(pci_resource_start(pci_dev,0),
@@ -1051,6 +1052,7 @@
         free_irq(bta->irq,bta);
 	release_mem_region(pci_resource_start(pci_dev,0),
 			   pci_resource_len(pci_dev,0));
+	iounmap(bta->mmio);
 
 	/* remove from linked list */
 	if (bta == btaudios) {
diff --git a/sound/oss/emu10k1/audio.c b/sound/oss/emu10k1/audio.c
index 86dd239..49f902f 100644
--- a/sound/oss/emu10k1/audio.c
+++ b/sound/oss/emu10k1/audio.c
@@ -111,9 +111,15 @@
 
 		if ((bytestocopy >= wiinst->buffer.fragment_size)
 		    || (bytestocopy >= count)) {
+			int rc;
+
 			bytestocopy = min_t(u32, bytestocopy, count);
 
-			emu10k1_wavein_xferdata(wiinst, (u8 __user *)buffer, &bytestocopy);
+			rc = emu10k1_wavein_xferdata(wiinst,
+						     (u8 __user *)buffer,
+						     &bytestocopy);
+			if (rc)
+				return rc;
 
 			count -= bytestocopy;
 			buffer += bytestocopy;
diff --git a/sound/oss/emu10k1/cardwi.c b/sound/oss/emu10k1/cardwi.c
index 8bbf44b..060d1be 100644
--- a/sound/oss/emu10k1/cardwi.c
+++ b/sound/oss/emu10k1/cardwi.c
@@ -304,11 +304,12 @@
 	}
 }
 
-static void copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov)
+static int copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov)
 {
-	if (cov == 1)
-		__copy_to_user(dst, src + str, len);
-	else {
+	if (cov == 1) {
+		if (__copy_to_user(dst, src + str, len))
+			return -EFAULT;
+	} else {
 		u8 byte;
 		u32 i;
 
@@ -316,22 +317,26 @@
 
 		for (i = 0; i < len; i++) {
 			byte = src[2 * i] ^ 0x80;
-			__copy_to_user(dst + i, &byte, 1);
+			if (__copy_to_user(dst + i, &byte, 1))
+				return -EFAULT;
 		}
 	}
+
+	return 0;
 }
 
-void emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size)
+int emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size)
 {
 	struct wavein_buffer *buffer = &wiinst->buffer;
 	u32 sizetocopy, sizetocopy_now, start;
 	unsigned long flags;
+	int ret;
 
 	sizetocopy = min_t(u32, buffer->size, *size);
 	*size = sizetocopy;
 
 	if (!sizetocopy)
-		return;
+		return 0;
 
 	spin_lock_irqsave(&wiinst->lock, flags);
 	start = buffer->pos;
@@ -345,11 +350,17 @@
 	if (sizetocopy > sizetocopy_now) {
 		sizetocopy -= sizetocopy_now;
 
-		copy_block(data, buffer->addr, start, sizetocopy_now, buffer->cov);
-		copy_block(data + sizetocopy_now, buffer->addr, 0, sizetocopy, buffer->cov);
+		ret = copy_block(data, buffer->addr, start, sizetocopy_now,
+				 buffer->cov);
+		if (ret == 0)
+			ret = copy_block(data + sizetocopy_now, buffer->addr, 0,
+					 sizetocopy, buffer->cov);
 	} else {
-		copy_block(data, buffer->addr, start, sizetocopy, buffer->cov);
+		ret = copy_block(data, buffer->addr, start, sizetocopy,
+				 buffer->cov);
 	}
+
+	return ret;
 }
 
 void emu10k1_wavein_update(struct emu10k1_card *card, struct wiinst *wiinst)
diff --git a/sound/oss/emu10k1/cardwi.h b/sound/oss/emu10k1/cardwi.h
index 15cfb9b..e82029b 100644
--- a/sound/oss/emu10k1/cardwi.h
+++ b/sound/oss/emu10k1/cardwi.h
@@ -83,7 +83,7 @@
 void emu10k1_wavein_start(struct emu10k1_wavedevice *);
 void emu10k1_wavein_stop(struct emu10k1_wavedevice *);
 void emu10k1_wavein_getxfersize(struct wiinst *, u32 *);
-void emu10k1_wavein_xferdata(struct wiinst *, u8 __user *, u32 *);
+int emu10k1_wavein_xferdata(struct wiinst *, u8 __user *, u32 *);
 int emu10k1_wavein_setformat(struct emu10k1_wavedevice *, struct wave_format *);
 void emu10k1_wavein_update(struct emu10k1_card *, struct wiinst *);
 
diff --git a/sound/oss/emu10k1/passthrough.c b/sound/oss/emu10k1/passthrough.c
index 4e3baca..6d21d43 100644
--- a/sound/oss/emu10k1/passthrough.c
+++ b/sound/oss/emu10k1/passthrough.c
@@ -162,12 +162,15 @@
 
 		DPD(3, "prepend size %d, prepending %d bytes\n", pt->prepend_size, needed);
 		if (count < needed) {
-			copy_from_user(pt->buf + pt->prepend_size, buffer, count);
+			if (copy_from_user(pt->buf + pt->prepend_size,
+					   buffer, count))
+				return -EFAULT;
 			pt->prepend_size += count;
 			DPD(3, "prepend size now %d\n", pt->prepend_size);
 			return count;
 		}
-		copy_from_user(pt->buf + pt->prepend_size, buffer, needed);
+		if (copy_from_user(pt->buf + pt->prepend_size, buffer, needed))
+			return -EFAULT;
 		r = pt_putblock(wave_dev, (u16 *) pt->buf, nonblock);
 		if (r)
 			return r;
@@ -178,7 +181,8 @@
 	blocks_copied = 0;
 	while (blocks > 0) {
 		u16 __user *bufptr = (u16 __user *) buffer + (bytes_copied/2);
-		copy_from_user(pt->buf, bufptr, PT_BLOCKSIZE);
+		if (copy_from_user(pt->buf, bufptr, PT_BLOCKSIZE))
+			return -EFAULT;
 		r = pt_putblock(wave_dev, (u16 *)pt->buf, nonblock);
 		if (r) {
 			if (bytes_copied)
@@ -193,7 +197,8 @@
 	i = count - bytes_copied;
 	if (i) {
 		pt->prepend_size = i;
-		copy_from_user(pt->buf, buffer + bytes_copied, i);
+		if (copy_from_user(pt->buf, buffer + bytes_copied, i))
+			return -EFAULT;
 		bytes_copied += i;
 		DPD(3, "filling prepend buffer with %d bytes", i);
 	}
diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
index 17837d4..c96cc8c 100644
--- a/sound/oss/via82cxxx_audio.c
+++ b/sound/oss/via82cxxx_audio.c
@@ -2120,8 +2120,8 @@
 		return NOPAGE_SIGBUS; /* Disallow mremap */
 	}
         if (!card) {
-		DPRINTK ("EXIT, returning NOPAGE_OOM\n");
-		return NOPAGE_OOM;	/* Nothing allocated */
+		DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n");
+		return NOPAGE_SIGBUS;	/* Nothing allocated */
 	}
 
 	pgoff = vma->vm_pgoff + ((address - vma->vm_start) >> PAGE_SHIFT);
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 6577b23..7abcb10 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1927,9 +1927,10 @@
 static struct snd_ac97_build_ops null_build_ops;
 
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-static void do_update_power(void *data)
+static void do_update_power(struct work_struct *work)
 {
-	update_power_regs(data);
+	update_power_regs(
+		container_of(work, struct snd_ac97, power_work.work));
 }
 #endif
 
@@ -1989,7 +1990,7 @@
 	mutex_init(&ac97->page_mutex);
 #ifdef CONFIG_SND_AC97_POWER_SAVE
 	ac97->power_workq = create_workqueue("ac97");
-	INIT_WORK(&ac97->power_work, do_update_power, ac97);
+	INIT_DELAYED_WORK(&ac97->power_work, do_update_power);
 #endif
 
 #ifdef CONFIG_PCI
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9c3d7ac..71482c1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -272,10 +272,11 @@
 /*
  * process queueud unsolicited events
  */
-static void process_unsol_events(void *data)
+static void process_unsol_events(struct work_struct *work)
 {
-	struct hda_bus *bus = data;
-	struct hda_bus_unsolicited *unsol = bus->unsol;
+	struct hda_bus_unsolicited *unsol =
+		container_of(work, struct hda_bus_unsolicited, work);
+	struct hda_bus *bus = unsol->bus;
 	struct hda_codec *codec;
 	unsigned int rp, caddr, res;
 
@@ -314,7 +315,8 @@
 		kfree(unsol);
 		return -ENOMEM;
 	}
-	INIT_WORK(&unsol->work, process_unsol_events, bus);
+	INIT_WORK(&unsol->work, process_unsol_events);
+	unsol->bus = bus;
 	bus->unsol = unsol;
 	return 0;
 }
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index f9416c3..9ca1baf 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -206,6 +206,7 @@
 	/* workqueue */
 	struct workqueue_struct *workq;
 	struct work_struct work;
+	struct hda_bus *bus;
 };
 
 /*
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 2fbe1d1..8f074c7 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -942,10 +942,11 @@
 }
 
 static struct work_struct device_change;
+static struct snd_pmac *device_change_chip;
 
-static void device_change_handler(void *self)
+static void device_change_handler(struct work_struct *work)
 {
-	struct snd_pmac *chip = self;
+	struct snd_pmac *chip = device_change_chip;
 	struct pmac_tumbler *mix;
 	int headphone, lineout;
 
@@ -1417,7 +1418,8 @@
 	chip->resume = tumbler_resume;
 #endif
 
-	INIT_WORK(&device_change, device_change_handler, (void *)chip);
+	INIT_WORK(&device_change, device_change_handler);
+	device_change_chip = chip;
 
 #ifdef PMAC_SUPPORT_AUTOMUTE
 	if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0)
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index 4b52d18..b76b3dd 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -48,7 +48,7 @@
 	
 	offset = area->vm_pgoff << PAGE_SHIFT;
 	offset += address - area->vm_start;
-	snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_OOM);
+	snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_SIGBUS);
 	vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->us428ctls_sharedmem + offset;
 	page = virt_to_page(vaddr);
 	get_page(page);