Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
diff --git a/Documentation/i2c/busses/i2c-viapro b/Documentation/i2c/busses/i2c-viapro
index 9363b8b..1677566 100644
--- a/Documentation/i2c/busses/i2c-viapro
+++ b/Documentation/i2c/busses/i2c-viapro
@@ -7,12 +7,10 @@
   * VIA Technologies, Inc. VT82C686A/B
     Datasheet: Sometimes available at the VIA website
 
-  * VIA Technologies, Inc. VT8231, VT8233, VT8233A, VT8235, VT8237
-    Datasheet: available on request from Via
+  * VIA Technologies, Inc. VT8231, VT8233, VT8233A, VT8235, VT8237R
+    Datasheet: available on request from VIA
 
 Authors:
-	Frodo Looijaard <frodol@dds.nl>,
-	Philip Edelbrock <phil@netroedge.com>,
 	Kyösti Mälkki <kmalkki@cc.hut.fi>,
 	Mark D. Studebaker <mdsxyz123@yahoo.com>,
 	Jean Delvare <khali@linux-fr.org>
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index cff7b65..d19993c 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -412,7 +412,7 @@
         release_region(address,FOO_EXTENT);
     /* SENSORS ONLY END */
     ERROR1:
-      kfree(new_client);
+      kfree(data);
     ERROR0:
       return err;
   }
@@ -443,7 +443,7 @@
       release_region(client->addr,LM78_EXTENT);
     /* HYBRID SENSORS CHIP ONLY END */
 
-    kfree(data);
+    kfree(i2c_get_clientdata(client));
     return 0;
   }
 
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
new file mode 100644
index 0000000..c45daab
--- /dev/null
+++ b/Documentation/networking/dccp.txt
@@ -0,0 +1,56 @@
+DCCP protocol
+============
+
+Last updated: 10 November 2005
+
+Contents
+========
+
+- Introduction
+- Missing features
+- Socket options
+- Notes
+
+Introduction
+============
+
+Datagram Congestion Control Protocol (DCCP) is an unreliable, connection
+based protocol designed to solve issues present in UDP and TCP particularly
+for real time and multimedia traffic.
+
+It has a base protocol and pluggable congestion control IDs (CCIDs).
+
+It is at draft RFC status and the homepage for DCCP as a protocol is at:
+	http://www.icir.org/kohler/dcp/
+
+Missing features
+================
+
+The DCCP implementation does not currently have all the features that are in
+the draft RFC.
+
+In particular the following are missing:
+- CCID2 support
+- feature negotiation
+
+When testing against other implementations it appears that elapsed time
+options are not coded compliant to the specification.
+
+Socket options
+==============
+
+DCCP_SOCKOPT_PACKET_SIZE is used for CCID3 to set default packet size for
+calculations.
+
+DCCP_SOCKOPT_SERVICE sets the service. This is compulsory as per the
+specification. If you don't set it you will get EPROTO.
+
+Notes
+=====
+
+SELinux does not yet have support for DCCP. You will need to turn it off or
+else you will get EACCES.
+
+DCCP does not travel through NAT successfully at present. This is because
+the checksum covers the psuedo-header as per TCP and UDP. It should be
+relatively trivial to add Linux NAT support for DCCP.
diff --git a/MAINTAINERS b/MAINTAINERS
index 5541f99..0b03a88 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -707,7 +707,7 @@
 P:	Arnaldo Carvalho de Melo
 M:	acme@mandriva.com
 L:	dccp@vger.kernel.org
-W:	http://www.wlug.org.nz/DCCP
+W:	http://linux-net.osdl.org/index.php/DCCP
 S:	Maintained
 
 DECnet NETWORK LAYER
diff --git a/Makefile b/Makefile
index ea96da1..6d1f727 100644
--- a/Makefile
+++ b/Makefile
@@ -347,7 +347,7 @@
 # Needed to be compatible with the O= option
 LINUXINCLUDE    := -Iinclude \
                    $(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) \
-		   -imacros include/linux/autoconf.h
+		   -include include/linux/autoconf.h
 
 CPPFLAGS        := -D__KERNEL__ $(LINUXINCLUDE)
 
@@ -407,7 +407,7 @@
 # of make so .config is not included in this case either (for *config).
 
 no-dot-config-targets := clean mrproper distclean \
-			 cscope TAGS tags help %docs check%
+			 cscope TAGS tags help %docs check% kernelrelease
 
 config-targets := 0
 mixed-targets  := 0
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ec77721..3df7cbd 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -239,6 +239,8 @@
 
 source "arch/arm/mach-omap1/Kconfig"
 
+source "arch/arm/mach-omap2/Kconfig"
+
 source "arch/arm/mach-s3c2410/Kconfig"
 
 source "arch/arm/mach-lh7a40x/Kconfig"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 114cda7f..81bd219 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -93,6 +93,7 @@
  machine-$(CONFIG_ARCH_IXP4XX)	   := ixp4xx
  machine-$(CONFIG_ARCH_IXP2000)    := ixp2000
  machine-$(CONFIG_ARCH_OMAP1)	   := omap1
+ machine-$(CONFIG_ARCH_OMAP2)	   := omap2
   incdir-$(CONFIG_ARCH_OMAP)	   := omap
  machine-$(CONFIG_ARCH_S3C2410)	   := s3c2410
  machine-$(CONFIG_ARCH_LH7A40X)	   := lh7a40x
diff --git a/arch/arm/configs/omap_h2_1610_defconfig b/arch/arm/configs/omap_h2_1610_defconfig
index 4198677..529f0f7 100644
--- a/arch/arm/configs/omap_h2_1610_defconfig
+++ b/arch/arm/configs/omap_h2_1610_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13
-# Mon Sep  5 18:07:12 2005
+# Linux kernel version: 2.6.14
+# Wed Nov  9 18:53:40 2005
 #
 CONFIG_ARM=y
 CONFIG_MMU=y
@@ -22,6 +22,7 @@
 # General setup
 #
 CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
 # CONFIG_POSIX_MQUEUE is not set
@@ -31,6 +32,7 @@
 # CONFIG_HOTPLUG is not set
 CONFIG_KOBJECT_UEVENT=y
 # CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_EMBEDDED is not set
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -60,6 +62,23 @@
 # CONFIG_KMOD is not set
 
 #
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
 # System Type
 #
 # CONFIG_ARCH_CLPS7500 is not set
@@ -81,6 +100,7 @@
 # CONFIG_ARCH_LH7A40X is not set
 CONFIG_ARCH_OMAP=y
 # CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
 # CONFIG_ARCH_IMX is not set
 # CONFIG_ARCH_H720X is not set
 # CONFIG_ARCH_AAEC2000 is not set
@@ -112,7 +132,7 @@
 # OMAP Core Type
 #
 # CONFIG_ARCH_OMAP730 is not set
-# CONFIG_ARCH_OMAP1510 is not set
+# CONFIG_ARCH_OMAP15XX is not set
 CONFIG_ARCH_OMAP16XX=y
 
 #
@@ -177,6 +197,8 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
 # CONFIG_LEDS is not set
 CONFIG_ALIGNMENT_TRAP=y
 
@@ -258,14 +280,19 @@
 # CONFIG_INET_ESP is not set
 # CONFIG_INET_IPCOMP is not set
 # CONFIG_INET_TUNNEL is not set
-CONFIG_IP_TCPDIAG=y
-# CONFIG_IP_TCPDIAG_IPV6 is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_BIC=y
 # CONFIG_IPV6 is not set
 # CONFIG_NETFILTER is not set
 
 #
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
 # SCTP Configuration (EXPERIMENTAL)
 #
 # CONFIG_IP_SCTP is not set
@@ -281,6 +308,10 @@
 # CONFIG_NET_DIVERT is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
 # CONFIG_NET_SCHED is not set
 # CONFIG_NET_CLS_ROUTE is not set
 
@@ -291,6 +322,7 @@
 # CONFIG_HAMRADIO is not set
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
 
 #
 # Device Drivers
@@ -328,21 +360,13 @@
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
 CONFIG_ATA_OVER_ETH=m
 
 #
 # SCSI device support
 #
+# CONFIG_RAID_ATTRS is not set
 CONFIG_SCSI=y
 CONFIG_SCSI_PROC_FS=y
 
@@ -369,10 +393,12 @@
 # CONFIG_SCSI_SPI_ATTRS is not set
 # CONFIG_SCSI_FC_ATTRS is not set
 # CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
 
 #
 # SCSI low-level drivers
 #
+# CONFIG_ISCSI_TCP is not set
 # CONFIG_SCSI_SATA is not set
 # CONFIG_SCSI_DEBUG is not set
 
@@ -404,6 +430,11 @@
 # CONFIG_TUN is not set
 
 #
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
 # Ethernet (10 or 100Mbit)
 #
 CONFIG_NET_ETHERNET=y
@@ -439,6 +470,7 @@
 # CONFIG_PPP_SYNC_TTY is not set
 # CONFIG_PPP_DEFLATE is not set
 # CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_MPPE is not set
 # CONFIG_PPPOE is not set
 CONFIG_SLIP=y
 CONFIG_SLIP_COMPRESSED=y
@@ -541,18 +573,18 @@
 #
 # TPM devices
 #
+# CONFIG_TELCLOCK is not set
 
 #
 # I2C support
 #
 # CONFIG_I2C is not set
-# CONFIG_I2C_SENSOR is not set
-CONFIG_ISP1301_OMAP=y
 
 #
 # Hardware Monitoring support
 #
 CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
 
 #
@@ -560,6 +592,10 @@
 #
 
 #
+# Multimedia Capabilities Port drivers
+#
+
+#
 # Multimedia devices
 #
 # CONFIG_VIDEO_DEV is not set
@@ -576,7 +612,6 @@
 # CONFIG_FB_CFB_FILLRECT is not set
 # CONFIG_FB_CFB_COPYAREA is not set
 # CONFIG_FB_CFB_IMAGEBLIT is not set
-# CONFIG_FB_SOFT_CURSOR is not set
 # CONFIG_FB_MACMODES is not set
 CONFIG_FB_MODE_HELPERS=y
 # CONFIG_FB_TILEBLITTING is not set
@@ -589,6 +624,7 @@
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_DUMMY_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
 CONFIG_FONTS=y
 CONFIG_FONT_8x8=y
 CONFIG_FONT_8x16=y
@@ -600,6 +636,7 @@
 # CONFIG_FONT_SUN8x16 is not set
 # CONFIG_FONT_SUN12x22 is not set
 # CONFIG_FONT_10x18 is not set
+# CONFIG_FONT_RL is not set
 
 #
 # Logo configuration
@@ -624,10 +661,10 @@
 # Open Sound System
 #
 CONFIG_SOUND_PRIME=y
+# CONFIG_OBSOLETE_OSS_DRIVER is not set
 # CONFIG_SOUND_MSNDCLAS is not set
 # CONFIG_SOUND_MSNDPIN is not set
 # CONFIG_SOUND_OSS is not set
-# CONFIG_SOUND_AD1980 is not set
 
 #
 # USB support
@@ -637,22 +674,21 @@
 # CONFIG_USB is not set
 
 #
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
 # USB Gadget Support
 #
-CONFIG_USB_GADGET=y
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET is not set
 # CONFIG_USB_GADGET_NET2280 is not set
 # CONFIG_USB_GADGET_PXA2XX is not set
 # CONFIG_USB_GADGET_GOKU is not set
 # CONFIG_USB_GADGET_LH7A40X is not set
-CONFIG_USB_GADGET_OMAP=y
-CONFIG_USB_OMAP=y
+# CONFIG_USB_GADGET_OMAP is not set
 # CONFIG_USB_GADGET_DUMMY_HCD is not set
-# CONFIG_USB_GADGET_DUALSPEED is not set
 # CONFIG_USB_ZERO is not set
-CONFIG_USB_ETH=y
-CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH is not set
 # CONFIG_USB_GADGETFS is not set
 # CONFIG_USB_FILE_STORAGE is not set
 # CONFIG_USB_G_SERIAL is not set
@@ -673,10 +709,6 @@
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
-
-#
-# XFS support
-#
 # CONFIG_XFS_FS is not set
 # CONFIG_MINIX_FS is not set
 CONFIG_ROMFS_FS=y
@@ -685,6 +717,7 @@
 CONFIG_DNOTIFY=y
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
 
 #
 # CD-ROM/DVD Filesystems
@@ -706,10 +739,10 @@
 #
 CONFIG_PROC_FS=y
 CONFIG_SYSFS=y
-# CONFIG_DEVPTS_FS_XATTR is not set
 # CONFIG_TMPFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
 
 #
 # Miscellaneous filesystems
@@ -750,6 +783,7 @@
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
 # CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
 
 #
 # Partition Types
@@ -859,6 +893,7 @@
 # Library routines
 #
 # CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
 CONFIG_CRC32=y
 # CONFIG_LIBCRC32C is not set
 CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index cb5e370..fe797cf 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -39,6 +39,7 @@
 
 		/* we must have at least one byte. */
 		tst	buf, #1			@ odd address?
+		movne	sum, sum, ror #8
 		ldrneb	td0, [buf], #1
 		subne	len, len, #1
 		adcnes	sum, sum, td0, put_byte_1
@@ -103,6 +104,9 @@
 		cmp	len, #8			@ Ensure that we have at least
 		blo	.less8			@ 8 bytes to copy.
 
+		tst	buf, #1
+		movne	sum, sum, ror #8
+
 		adds	sum, sum, #0		@ C = 0
 		tst	buf, #3			@ Test destination alignment
 		blne	.not_aligned		@ aligh destination, return here
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 89762a2..3852858 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -8,6 +8,16 @@
 
 comment "IXP4xx Platforms"
 
+# This entry is placed on top because otherwise it would have
+# been shown as a submenu.
+config MACH_NSLU2
+	bool
+	prompt "NSLU2" if !(MACH_IXDP465 || MACH_IXDPG425 || ARCH_IXDP425 || ARCH_ADI_COYOTE || ARCH_AVILA || ARCH_IXCDP1100 || ARCH_PRPMC1100 || MACH_GTWX5715)
+	help
+	  Say 'Y' here if you want your kernel to support Linksys's
+	  NSLU2 NAS device. For more information on this platform,
+	  see http://www.nslu2-linux.org
+
 config ARCH_AVILA
 	bool "Avila"
 	help
diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile
index ddecbda..7a15629 100644
--- a/arch/arm/mach-ixp4xx/Makefile
+++ b/arch/arm/mach-ixp4xx/Makefile
@@ -8,4 +8,5 @@
 obj-$(CONFIG_MACH_IXDPG425)	+= ixdpg425-pci.o coyote-setup.o
 obj-$(CONFIG_ARCH_ADI_COYOTE)	+= coyote-pci.o coyote-setup.o
 obj-$(CONFIG_MACH_GTWX5715)	+= gtwx5715-pci.o gtwx5715-setup.o
+obj-$(CONFIG_MACH_NSLU2)	+= nslu2-pci.o nslu2-setup.o nslu2-power.o
 
diff --git a/arch/arm/mach-ixp4xx/nslu2-pci.c b/arch/arm/mach-ixp4xx/nslu2-pci.c
new file mode 100644
index 0000000..a575f2e
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/nslu2-pci.c
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/mach-ixp4xx/nslu2-pci.c
+ *
+ * NSLU2 board-level PCI initialization
+ *
+ * based on ixdp425-pci.c:
+ *	Copyright (C) 2002 Intel Corporation.
+ *	Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * Maintainer: http://www.nslu2-linux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/mach/pci.h>
+#include <asm/mach-types.h>
+
+void __init nslu2_pci_preinit(void)
+{
+	set_irq_type(IRQ_NSLU2_PCI_INTA, IRQT_LOW);
+	set_irq_type(IRQ_NSLU2_PCI_INTB, IRQT_LOW);
+	set_irq_type(IRQ_NSLU2_PCI_INTC, IRQT_LOW);
+
+	gpio_line_isr_clear(NSLU2_PCI_INTA_PIN);
+	gpio_line_isr_clear(NSLU2_PCI_INTB_PIN);
+	gpio_line_isr_clear(NSLU2_PCI_INTC_PIN);
+
+	/* INTD is not configured as GPIO is used
+	 * for the power input button.
+	 */
+
+	ixp4xx_pci_preinit();
+}
+
+static int __init nslu2_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+	static int pci_irq_table[NSLU2_PCI_IRQ_LINES] = {
+		IRQ_NSLU2_PCI_INTA,
+		IRQ_NSLU2_PCI_INTB,
+		IRQ_NSLU2_PCI_INTC,
+	};
+
+	int irq = -1;
+
+	if (slot >= 1 && slot <= NSLU2_PCI_MAX_DEV &&
+		pin >= 1 && pin <= NSLU2_PCI_IRQ_LINES) {
+			irq = pci_irq_table[(slot + pin - 2) % NSLU2_PCI_IRQ_LINES];
+	}
+
+	return irq;
+}
+
+struct hw_pci __initdata nslu2_pci = {
+	.nr_controllers = 1,
+	.preinit	= nslu2_pci_preinit,
+	.swizzle	= pci_std_swizzle,
+	.setup		= ixp4xx_setup,
+	.scan		= ixp4xx_scan_bus,
+	.map_irq	= nslu2_map_irq,
+};
+
+int __init nslu2_pci_init(void) /* monkey see, monkey do */
+{
+	if (machine_is_nslu2())
+		pci_common_init(&nslu2_pci);
+
+	return 0;
+}
+
+subsys_initcall(nslu2_pci_init);
diff --git a/arch/arm/mach-ixp4xx/nslu2-power.c b/arch/arm/mach-ixp4xx/nslu2-power.c
new file mode 100644
index 0000000..18fbc8c
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/nslu2-power.c
@@ -0,0 +1,92 @@
+/*
+ * arch/arm/mach-ixp4xx/nslu2-power.c
+ *
+ * NSLU2 Power/Reset driver
+ *
+ * Copyright (C) 2005 Tower Technologies
+ *
+ * based on nslu2-io.c
+ *  Copyright (C) 2004 Karen Spearel
+ *
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ * Maintainers: http://www.nslu2-linux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-types.h>
+
+extern void ctrl_alt_del(void);
+
+static irqreturn_t nslu2_power_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	/* Signal init to do the ctrlaltdel action, this will bypass init if
+	 * it hasn't started and do a kernel_restart.
+	 */
+	ctrl_alt_del();
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t nslu2_reset_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	/* This is the paper-clip reset, it shuts the machine down directly.
+	 */
+	machine_power_off();
+
+	return IRQ_HANDLED;
+}
+
+static int __init nslu2_power_init(void)
+{
+	if (!(machine_is_nslu2()))
+		return 0;
+
+	*IXP4XX_GPIO_GPISR = 0x20400000;	/* read the 2 irqs to clr */
+
+	set_irq_type(NSLU2_RB_IRQ, IRQT_LOW);
+	set_irq_type(NSLU2_PB_IRQ, IRQT_HIGH);
+
+	gpio_line_isr_clear(NSLU2_RB_GPIO);
+	gpio_line_isr_clear(NSLU2_PB_GPIO);
+
+	if (request_irq(NSLU2_RB_IRQ, &nslu2_reset_handler,
+		SA_INTERRUPT, "NSLU2 reset button", NULL) < 0) {
+
+		printk(KERN_DEBUG "Reset Button IRQ %d not available\n",
+			NSLU2_RB_IRQ);
+
+		return -EIO;
+	}
+
+	if (request_irq(NSLU2_PB_IRQ, &nslu2_power_handler,
+		SA_INTERRUPT, "NSLU2 power button", NULL) < 0) {
+
+		printk(KERN_DEBUG "Power Button IRQ %d not available\n",
+			NSLU2_PB_IRQ);
+
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void __exit nslu2_power_exit(void)
+{
+	free_irq(NSLU2_RB_IRQ, NULL);
+	free_irq(NSLU2_PB_IRQ, NULL);
+}
+
+module_init(nslu2_power_init);
+module_exit(nslu2_power_exit);
+
+MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
+MODULE_DESCRIPTION("NSLU2 Power/Reset driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
new file mode 100644
index 0000000..289e94c
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -0,0 +1,134 @@
+/*
+ * arch/arm/mach-ixp4xx/nslu2-setup.c
+ *
+ * NSLU2 board-setup
+ *
+ * based ixdp425-setup.c:
+ *      Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * Author: Mark Rakes <mrakes at mac.com>
+ * Maintainers: http://www.nslu2-linux.org/
+ *
+ * Fixed missing init_time in MACHINE_START kas11 10/22/04
+ * Changed to conform to new style __init ixdp425 kas11 10/22/04
+ */
+
+#include <linux/kernel.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/flash.h>
+
+static struct flash_platform_data nslu2_flash_data = {
+	.map_name		= "cfi_probe",
+	.width			= 2,
+};
+
+static struct resource nslu2_flash_resource = {
+	.start			= NSLU2_FLASH_BASE,
+	.end			= NSLU2_FLASH_BASE + NSLU2_FLASH_SIZE,
+	.flags			= IORESOURCE_MEM,
+};
+
+static struct platform_device nslu2_flash = {
+	.name			= "IXP4XX-Flash",
+	.id			= 0,
+	.dev.platform_data	= &nslu2_flash_data,
+	.num_resources		= 1,
+	.resource		= &nslu2_flash_resource,
+};
+
+static struct ixp4xx_i2c_pins nslu2_i2c_gpio_pins = {
+	.sda_pin		= NSLU2_SDA_PIN,
+	.scl_pin		= NSLU2_SCL_PIN,
+};
+
+static struct platform_device nslu2_i2c_controller = {
+	.name			= "IXP4XX-I2C",
+	.id			= 0,
+	.dev.platform_data	= &nslu2_i2c_gpio_pins,
+	.num_resources		= 0,
+};
+
+static struct resource nslu2_uart_resources[] = {
+	{
+		.start		= IXP4XX_UART1_BASE_PHYS,
+		.end		= IXP4XX_UART1_BASE_PHYS + 0x0fff,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= IXP4XX_UART2_BASE_PHYS,
+		.end		= IXP4XX_UART2_BASE_PHYS + 0x0fff,
+		.flags		= IORESOURCE_MEM,
+	}
+};
+
+static struct plat_serial8250_port nslu2_uart_data[] = {
+	{
+		.mapbase	= IXP4XX_UART1_BASE_PHYS,
+		.membase	= (char *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET,
+		.irq		= IRQ_IXP4XX_UART1,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+		.uartclk	= IXP4XX_UART_XTAL,
+	},
+	{
+		.mapbase	= IXP4XX_UART2_BASE_PHYS,
+		.membase	= (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET,
+		.irq		= IRQ_IXP4XX_UART2,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+		.uartclk	= IXP4XX_UART_XTAL,
+	},
+	{ }
+};
+
+static struct platform_device nslu2_uart = {
+	.name			= "serial8250",
+	.id			= PLAT8250_DEV_PLATFORM,
+	.dev.platform_data	= nslu2_uart_data,
+	.num_resources		= 2,
+	.resource		= nslu2_uart_resources,
+};
+
+static struct platform_device *nslu2_devices[] __initdata = {
+	&nslu2_i2c_controller,
+	&nslu2_flash,
+	&nslu2_uart,
+};
+
+static void nslu2_power_off(void)
+{
+	/* This causes the box to drop the power and go dead. */
+
+	/* enable the pwr cntl gpio */
+	gpio_line_config(NSLU2_PO_GPIO, IXP4XX_GPIO_OUT);
+
+	/* do the deed */
+	gpio_line_set(NSLU2_PO_GPIO, IXP4XX_GPIO_HIGH);
+}
+
+static void __init nslu2_init(void)
+{
+	ixp4xx_sys_init();
+
+	pm_power_off = nslu2_power_off;
+
+	platform_add_devices(nslu2_devices, ARRAY_SIZE(nslu2_devices));
+}
+
+MACHINE_START(NSLU2, "Linksys NSLU2")
+	/* Maintainer: www.nslu2-linux.org */
+	.phys_ram	= PHYS_OFFSET,
+	.phys_io	= IXP4XX_PERIPHERAL_BASE_PHYS,
+	.io_pg_offst	= ((IXP4XX_PERIPHERAL_BASE_VIRT) >> 18) & 0xFFFC,
+	.boot_params	= 0x00000100,
+	.map_io		= ixp4xx_map_io,
+	.init_irq	= ixp4xx_init_irq,
+	.timer          = &ixp4xx_timer,
+	.init_machine	= nslu2_init,
+MACHINE_END
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 27fc2e8..86a0f0d 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -6,10 +6,10 @@
 	bool "OMAP730 Based System"
 	select ARCH_OMAP_OTG
 
-config ARCH_OMAP1510
+config ARCH_OMAP15XX
 	depends on ARCH_OMAP1
 	default y
-	bool "OMAP1510 Based System"
+	bool "OMAP15xx Based System"
 
 config ARCH_OMAP16XX
 	depends on ARCH_OMAP1
@@ -21,7 +21,7 @@
 
 config MACH_OMAP_INNOVATOR
 	bool "TI Innovator"
-	depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX)
+	depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
 	help
           TI OMAP 1510 or 1610 Innovator board support. Say Y here if you
           have such a board.
@@ -64,20 +64,30 @@
 
 config MACH_VOICEBLUE
 	bool "Voiceblue"
-	depends on ARCH_OMAP1 && ARCH_OMAP1510
+	depends on ARCH_OMAP1 && ARCH_OMAP15XX
 	help
 	  Support for Voiceblue GSM/VoIP gateway. Say Y here if you have
 	  such a board.
 
 config MACH_NETSTAR
 	bool "NetStar"
-	depends on ARCH_OMAP1 && ARCH_OMAP1510
+	depends on ARCH_OMAP1 && ARCH_OMAP15XX
 	help
 	  Support for NetStar PBX. Say Y here if you have such a board.
 
+config MACH_OMAP_PALMTE
+	bool "Palm Tungsten E"
+	depends on ARCH_OMAP1 && ARCH_OMAP15XX
+	help
+          Support for the Palm Tungsten E PDA. Currently only the LCD panel
+          is supported. To boot the kernel, you'll need a PalmOS compatible
+          bootloader; check out http://palmtelinux.sourceforge.net for more
+          informations.
+          Say Y here if you have such a PDA, say NO otherwise.
+
 config MACH_OMAP_GENERIC
 	bool "Generic OMAP board"
-	depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX)
+	depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
 	help
           Support for generic OMAP-1510, 1610 or 1710 board with
           no FPGA. Can be used as template for porting Linux to
@@ -121,32 +131,32 @@
 
 config OMAP_ARM_168MHZ
 	bool "OMAP ARM 168 MHz CPU"
-	depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX || ARCH_OMAP730)
+	depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730)
 	help
           Enable 168MHz clock for OMAP CPU. If unsure, say N.
 
 config OMAP_ARM_150MHZ
 	bool "OMAP ARM 150 MHz CPU"
-	depends on ARCH_OMAP1 && ARCH_OMAP1510
+	depends on ARCH_OMAP1 && ARCH_OMAP15XX
 	help
 	  Enable 150MHz clock for OMAP CPU. If unsure, say N.
 
 config OMAP_ARM_120MHZ
 	bool "OMAP ARM 120 MHz CPU"
-	depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX || ARCH_OMAP730)
+	depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730)
 	help
           Enable 120MHz clock for OMAP CPU. If unsure, say N.
 
 config OMAP_ARM_60MHZ
 	bool "OMAP ARM 60 MHz CPU"
-	depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX || ARCH_OMAP730)
+	depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730)
         default y
 	help
           Enable 60MHz clock for OMAP CPU. If unsure, say Y.
 
 config OMAP_ARM_30MHZ
 	bool "OMAP ARM 30 MHz CPU"
-	depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX || ARCH_OMAP730)
+	depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730)
 	help
           Enable 30MHz clock for OMAP CPU. If unsure, say N.
 
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 181a93d..b0b0015 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,7 +3,7 @@
 #
 
 # Common support
-obj-y := io.o id.o irq.o time.o serial.o devices.o
+obj-y := io.o id.o clock.o irq.o time.o mux.o serial.o devices.o
 led-y := leds.o
 
 # Specific board support
@@ -15,8 +15,9 @@
 obj-$(CONFIG_MACH_OMAP_H3)		+= board-h3.o
 obj-$(CONFIG_MACH_VOICEBLUE)		+= board-voiceblue.o
 obj-$(CONFIG_MACH_NETSTAR)		+= board-netstar.o
+obj-$(CONFIG_MACH_OMAP_PALMTE)		+= board-palmte.o
 
-ifeq ($(CONFIG_ARCH_OMAP1510),y)
+ifeq ($(CONFIG_ARCH_OMAP15XX),y)
 # Innovator-1510 FPGA
 obj-$(CONFIG_MACH_OMAP_INNOVATOR)	+= fpga.o
 endif
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index c209c71..4b292e9 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -15,7 +15,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
 
 #include <asm/hardware.h>
 #include <asm/mach-types.h>
@@ -28,8 +28,6 @@
 #include <asm/arch/board.h>
 #include <asm/arch/common.h>
 
-static int __initdata generic_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
 static void __init omap_generic_init_irq(void)
 {
 	omap_init_irq();
@@ -37,7 +35,7 @@
 
 /* assume no Mini-AB port */
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 static struct omap_usb_config generic1510_usb_config __initdata = {
 	.register_host	= 1,
 	.register_dev	= 1,
@@ -76,21 +74,19 @@
 
 #endif
 
+static struct omap_uart_config generic_uart_config __initdata = {
+	.enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
 static struct omap_board_config_kernel generic_config[] = {
 	{ OMAP_TAG_USB,           NULL },
 	{ OMAP_TAG_MMC,           &generic_mmc_config },
+	{ OMAP_TAG_UART,	&generic_uart_config },
 };
 
 static void __init omap_generic_init(void)
 {
-	const struct omap_uart_config *uart_conf;
-
-	/*
-	 * Make sure the serial ports are muxed on at this point.
-	 * You have to mux them off in device drivers later on
-	 * if not needed.
-	 */
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		generic_config[0].data = &generic1510_usb_config;
 	}
@@ -101,20 +97,9 @@
 	}
 #endif
 
-	uart_conf = omap_get_config(OMAP_TAG_UART, struct omap_uart_config);
-	if (uart_conf != NULL) {
-		unsigned int enabled_ports, i;
-
-		enabled_ports = uart_conf->enabled_uarts;
-		for (i = 0; i < 3; i++) {
-			if (!(enabled_ports & (1 << i)))
-				generic_serial_ports[i] = 0;
-		}
-	}
-
 	omap_board_config = generic_config;
 	omap_board_config_size = ARRAY_SIZE(generic_config);
-	omap_serial_init(generic_serial_ports);
+	omap_serial_init();
 }
 
 static void __init omap_generic_map_io(void)
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 4ee6bd8..a07e2c9 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -40,8 +40,6 @@
 
 extern int omap_gpio_init(void);
 
-static int __initdata h2_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
 static struct mtd_partition h2_partitions[] = {
 	/* bootloader (U-Boot, etc) in first sector */
 	{
@@ -160,9 +158,20 @@
 	},
 };
 
+static struct omap_uart_config h2_uart_config __initdata = {
+	.enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_lcd_config h2_lcd_config __initdata = {
+	.panel_name	= "h2",
+	.ctrl_name	= "internal",
+};
+
 static struct omap_board_config_kernel h2_config[] = {
 	{ OMAP_TAG_USB,           &h2_usb_config },
 	{ OMAP_TAG_MMC,           &h2_mmc_config },
+	{ OMAP_TAG_UART,	&h2_uart_config },
+	{ OMAP_TAG_LCD,		&h2_lcd_config },
 };
 
 static void __init h2_init(void)
@@ -180,12 +189,12 @@
 	platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices));
 	omap_board_config = h2_config;
 	omap_board_config_size = ARRAY_SIZE(h2_config);
+	omap_serial_init();
 }
 
 static void __init h2_map_io(void)
 {
 	omap_map_common_io();
-	omap_serial_init(h2_serial_ports);
 }
 
 MACHINE_START(OMAP_H2, "TI-H2")
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index fc82436..668e278 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -41,8 +41,6 @@
 
 extern int omap_gpio_init(void);
 
-static int __initdata h3_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
 static struct mtd_partition h3_partitions[] = {
 	/* bootloader (U-Boot, etc) in first sector */
 	{
@@ -168,9 +166,20 @@
 	},
 };
 
+static struct omap_uart_config h3_uart_config __initdata = {
+	.enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_lcd_config h3_lcd_config __initdata = {
+	.panel_name	= "h3",
+	.ctrl_name	= "internal",
+};
+
 static struct omap_board_config_kernel h3_config[] = {
-	{ OMAP_TAG_USB,	 &h3_usb_config },
-	{ OMAP_TAG_MMC,  &h3_mmc_config },
+	{ OMAP_TAG_USB,		&h3_usb_config },
+	{ OMAP_TAG_MMC,		&h3_mmc_config },
+	{ OMAP_TAG_UART,	&h3_uart_config },
+	{ OMAP_TAG_LCD,		&h3_lcd_config },
 };
 
 static void __init h3_init(void)
@@ -180,6 +189,7 @@
 	(void) platform_add_devices(devices, ARRAY_SIZE(devices));
 	omap_board_config = h3_config;
 	omap_board_config_size = ARRAY_SIZE(h3_config);
+	omap_serial_init();
 }
 
 static void __init h3_init_smc91x(void)
@@ -201,7 +211,6 @@
 static void __init h3_map_io(void)
 {
 	omap_map_common_io();
-	omap_serial_init(h3_serial_ports);
 }
 
 MACHINE_START(OMAP_H3, "TI OMAP1710 H3 board")
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index a2eac85..95f1ff3 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -36,8 +36,6 @@
 #include <asm/arch/usb.h>
 #include <asm/arch/common.h>
 
-static int __initdata innovator_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
 static struct mtd_partition innovator_partitions[] = {
 	/* bootloader (U-Boot, etc) in first sector */
 	{
@@ -99,7 +97,7 @@
 	.resource	= &innovator_flash_resource,
 };
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 
 /* Only FPGA needs to be mapped here. All others are done with ioremap */
 static struct map_desc innovator1510_io_desc[] __initdata = {
@@ -136,7 +134,7 @@
 	&innovator1510_smc91x_device,
 };
 
-#endif /* CONFIG_ARCH_OMAP1510 */
+#endif /* CONFIG_ARCH_OMAP15XX */
 
 #ifdef CONFIG_ARCH_OMAP16XX
 
@@ -185,7 +183,7 @@
 {
 	omap_init_irq();
 	omap_gpio_init();
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		omap1510_fpga_init_irq();
 	}
@@ -193,7 +191,7 @@
 	innovator_init_smc91x();
 }
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 static struct omap_usb_config innovator1510_usb_config __initdata = {
 	/* for bundled non-standard host and peripheral cables */
 	.hmc_mode	= 4,
@@ -205,6 +203,11 @@
 	.register_dev	= 1,
 	.pins[0]	= 2,
 };
+
+static struct omap_lcd_config innovator1510_lcd_config __initdata = {
+	.panel_name	= "inn1510",
+	.ctrl_name	= "internal",
+};
 #endif
 
 #ifdef CONFIG_ARCH_OMAP16XX
@@ -222,6 +225,11 @@
 
 	.pins[1]	= 3,
 };
+
+static struct omap_lcd_config innovator1610_lcd_config __initdata = {
+	.panel_name	= "inn1610",
+	.ctrl_name	= "internal",
+};
 #endif
 
 static struct omap_mmc_config innovator_mmc_config __initdata = {
@@ -234,14 +242,20 @@
 	},
 };
 
+static struct omap_uart_config innovator_uart_config __initdata = {
+	.enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
 static struct omap_board_config_kernel innovator_config[] = {
 	{ OMAP_TAG_USB,         NULL },
+	{ OMAP_TAG_LCD,		NULL },
 	{ OMAP_TAG_MMC,		&innovator_mmc_config },
+	{ OMAP_TAG_UART,	&innovator_uart_config },
 };
 
 static void __init innovator_init(void)
 {
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		platform_add_devices(innovator1510_devices, ARRAY_SIZE(innovator1510_devices));
 	}
@@ -252,23 +266,28 @@
 	}
 #endif
 
-#ifdef CONFIG_ARCH_OMAP1510
-	if (cpu_is_omap1510())
+#ifdef CONFIG_ARCH_OMAP15XX
+	if (cpu_is_omap1510()) {
 		innovator_config[0].data = &innovator1510_usb_config;
+		innovator_config[1].data = &innovator1510_lcd_config;
+	}
 #endif
 #ifdef CONFIG_ARCH_OMAP16XX
-	if (cpu_is_omap1610())
+	if (cpu_is_omap1610()) {
 		innovator_config[0].data = &h2_usb_config;
+		innovator_config[1].data = &innovator1610_lcd_config;
+	}
 #endif
 	omap_board_config = innovator_config;
 	omap_board_config_size = ARRAY_SIZE(innovator_config);
+	omap_serial_init();
 }
 
 static void __init innovator_map_io(void)
 {
 	omap_map_common_io();
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		iotable_init(innovator1510_io_desc, ARRAY_SIZE(innovator1510_io_desc));
 		udelay(10);	/* Delay needed for FPGA */
@@ -280,7 +299,6 @@
 		       fpga_read(OMAP1510_FPGA_BOARD_REV));
 	}
 #endif
-	omap_serial_init(innovator_serial_ports);
 }
 
 MACHINE_START(OMAP_INNOVATOR, "TI-Innovator")
diff --git a/arch/arm/mach-omap1/board-netstar.c b/arch/arm/mach-omap1/board-netstar.c
index c851c2e..0448fa7 100644
--- a/arch/arm/mach-omap1/board-netstar.c
+++ b/arch/arm/mach-omap1/board-netstar.c
@@ -55,6 +55,14 @@
 	&netstar_smc91x_device,
 };
 
+static struct omap_uart_config netstar_uart_config __initdata = {
+	.enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_board_config_kernel netstar_config[] = {
+	{ OMAP_TAG_UART,	&netstar_uart_config },
+};
+
 static void __init netstar_init_irq(void)
 {
 	omap_init_irq();
@@ -92,14 +100,15 @@
 	/* Switch off red LED */
 	omap_writeb(0x00, OMAP_LPG1_PMR);	/* Disable clock */
 	omap_writeb(0x80, OMAP_LPG1_LCR);
-}
 
-static int __initdata omap_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
+	omap_board_config = netstar_config;
+	omap_board_config_size = ARRAY_SIZE(netstar_config);
+	omap_serial_init();
+}
 
 static void __init netstar_map_io(void)
 {
 	omap_map_common_io();
-	omap_serial_init(omap_serial_ports);
 }
 
 #define MACHINE_PANICED		1
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index a88524e..e990e1b 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -46,8 +46,6 @@
 #include <asm/arch/tc.h>
 #include <asm/arch/common.h>
 
-static int __initdata osk_serial_ports[OMAP_MAX_NR_PORTS] = {1, 0, 0};
-
 static struct mtd_partition osk_partitions[] = {
 	/* bootloader (U-Boot, etc) in first sector */
 	{
@@ -155,7 +153,7 @@
 	}
 
 	/* Check EMIFS wait states to fix errors with SMC_GET_PKT_HDR */
-	EMIFS_CCS(1) |= 0x2;
+	EMIFS_CCS(1) |= 0x3;
 }
 
 static void __init osk_init_cf(void)
@@ -193,8 +191,19 @@
 	.pins[0]	= 2,
 };
 
+static struct omap_uart_config osk_uart_config __initdata = {
+	.enabled_uarts = (1 << 0),
+};
+
+static struct omap_lcd_config osk_lcd_config __initdata = {
+	.panel_name	= "osk",
+	.ctrl_name	= "internal",
+};
+
 static struct omap_board_config_kernel osk_config[] = {
 	{ OMAP_TAG_USB,           &osk_usb_config },
+	{ OMAP_TAG_UART,		&osk_uart_config },
+	{ OMAP_TAG_LCD,			&osk_lcd_config },
 };
 
 #ifdef	CONFIG_OMAP_OSK_MISTRAL
@@ -254,13 +263,13 @@
 	omap_board_config_size = ARRAY_SIZE(osk_config);
 	USB_TRANSCEIVER_CTRL_REG |= (3 << 1);
 
+	omap_serial_init();
 	osk_mistral_init();
 }
 
 static void __init osk_map_io(void)
 {
 	omap_map_common_io();
-	omap_serial_init(osk_serial_ports);
 }
 
 MACHINE_START(OMAP_OSK, "TI-OSK")
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
new file mode 100644
index 0000000..540b20d
--- /dev/null
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -0,0 +1,87 @@
+/*
+ * linux/arch/arm/mach-omap1/board-palmte.c
+ *
+ * Modified from board-generic.c
+ *
+ * Support for the Palm Tungsten E PDA.
+ *
+ * Original version : Laurent Gonzalez
+ *
+ * Maintainters : http://palmtelinux.sf.net
+ *                palmtelinux-developpers@lists.sf.net
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/notifier.h>
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/usb.h>
+#include <asm/arch/board.h>
+#include <asm/arch/common.h>
+#include <asm/hardware/clock.h>
+
+static void __init omap_generic_init_irq(void)
+{
+	omap_init_irq();
+}
+
+static struct omap_usb_config palmte_usb_config __initdata = {
+	.register_dev	= 1,
+	.hmc_mode	= 0,
+	.pins[0]	= 3,
+};
+
+static struct omap_mmc_config palmte_mmc_config __initdata = {
+	.mmc [0] = {
+		.enabled 	= 1,
+		.wire4		= 1,
+		.wp_pin		= OMAP_MPUIO(3),
+		.power_pin	= -1,
+		.switch_pin	= -1,
+	},
+};
+
+static struct omap_lcd_config palmte_lcd_config __initdata = {
+	.panel_name	= "palmte",
+	.ctrl_name	= "internal",
+};
+
+static struct omap_board_config_kernel palmte_config[] = {
+	{ OMAP_TAG_USB, &palmte_usb_config },
+	{ OMAP_TAG_MMC,	&palmte_mmc_config },
+	{ OMAP_TAG_LCD,	&palmte_lcd_config },
+};
+
+static void __init omap_generic_init(void)
+{
+	omap_board_config = palmte_config;
+	omap_board_config_size = ARRAY_SIZE(palmte_config);
+}
+
+static void __init omap_generic_map_io(void)
+{
+	omap_map_common_io();
+}
+
+MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
+	.phys_ram	= 0x10000000,
+	.phys_io	= 0xfff00000,
+	.io_pg_offst	= ((0xfef00000) >> 18) & 0xfffc,
+	.boot_params	= 0x10000100,
+	.map_io		= omap_generic_map_io,
+	.init_irq	= omap_generic_init_irq,
+	.init_machine	= omap_generic_init,
+	.timer		= &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index 354b157..bd900b7 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -29,6 +29,7 @@
 #include <asm/arch/mux.h>
 #include <asm/arch/fpga.h>
 #include <asm/arch/common.h>
+#include <asm/arch/board.h>
 
 static struct resource smc91x_resources[] = {
 	[0] = {
@@ -43,8 +44,6 @@
 	},
 };
 
-static int __initdata p2_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 0};
-
 static struct mtd_partition p2_partitions[] = {
 	/* bootloader (U-Boot, etc) in first sector */
 	{
@@ -111,9 +110,27 @@
 	&smc91x_device,
 };
 
+static struct omap_uart_config perseus2_uart_config __initdata = {
+	.enabled_uarts = ((1 << 0) | (1 << 1)),
+};
+
+static struct omap_lcd_config perseus2_lcd_config __initdata = {
+	.panel_name	= "p2",
+	.ctrl_name	= "internal",
+};
+
+static struct omap_board_config_kernel perseus2_config[] = {
+	{ OMAP_TAG_UART,	&perseus2_uart_config },
+	{ OMAP_TAG_LCD,		&perseus2_lcd_config },
+};
+
 static void __init omap_perseus2_init(void)
 {
 	(void) platform_add_devices(devices, ARRAY_SIZE(devices));
+
+	omap_board_config = perseus2_config;
+	omap_board_config_size = ARRAY_SIZE(perseus2_config);
+	omap_serial_init();
 }
 
 static void __init perseus2_init_smc91x(void)
@@ -131,7 +148,6 @@
 	omap_gpio_init();
 	perseus2_init_smc91x();
 }
-
 /* Only FPGA needs to be mapped here. All others are done with ioremap */
 static struct map_desc omap_perseus2_io_desc[] __initdata = {
 	{
@@ -179,7 +195,6 @@
 	 * It is used as the Ethernet controller interrupt
 	 */
 	omap_writel(omap_readl(OMAP730_IO_CONF_9) & 0x1FFFFFFF, OMAP730_IO_CONF_9);
-	omap_serial_init(p2_serial_ports);
 }
 
 MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2")
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 3f018b2..6f9a622 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -150,9 +150,14 @@
 	},
 };
 
+static struct omap_uart_config voiceblue_uart_config __initdata = {
+	.enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
 static struct omap_board_config_kernel voiceblue_config[] = {
 	{ OMAP_TAG_USB, &voiceblue_usb_config },
 	{ OMAP_TAG_MMC, &voiceblue_mmc_config },
+	{ OMAP_TAG_UART,	&voiceblue_uart_config },
 };
 
 static void __init voiceblue_init_irq(void)
@@ -191,6 +196,7 @@
 	platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices));
 	omap_board_config = voiceblue_config;
 	omap_board_config_size = ARRAY_SIZE(voiceblue_config);
+	omap_serial_init();
 
 	/* There is a good chance board is going up, so enable power LED
 	 * (it is connected through invertor) */
@@ -198,12 +204,9 @@
 	omap_writeb(0x00, OMAP_LPG1_PMR);	/* Disable clock */
 }
 
-static int __initdata omap_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
 static void __init voiceblue_map_io(void)
 {
 	omap_map_common_io();
-	omap_serial_init(omap_serial_ports);
 }
 
 #define MACHINE_PANICED		1
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
new file mode 100644
index 0000000..4277eee
--- /dev/null
+++ b/arch/arm/mach-omap1/clock.c
@@ -0,0 +1,792 @@
+/*
+ *  linux/arch/arm/mach-omap1/clock.c
+ *
+ *  Copyright (C) 2004 - 2005 Nokia corporation
+ *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *
+ *  Modified to use omap shared clock framework by
+ *  Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+#include <asm/io.h>
+#include <asm/hardware/clock.h>
+
+#include <asm/arch/usb.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/sram.h>
+
+#include "clock.h"
+
+__u32 arm_idlect1_mask;
+
+/*-------------------------------------------------------------------------
+ * Omap1 specific clock functions
+ *-------------------------------------------------------------------------*/
+
+static void omap1_watchdog_recalc(struct clk * clk)
+{
+	clk->rate = clk->parent->rate / 14;
+}
+
+static void omap1_uart_recalc(struct clk * clk)
+{
+	unsigned int val = omap_readl(clk->enable_reg);
+	if (val & clk->enable_bit)
+		clk->rate = 48000000;
+	else
+		clk->rate = 12000000;
+}
+
+static int omap1_clk_enable_dsp_domain(struct clk *clk)
+{
+	int retval;
+
+	retval = omap1_clk_use(&api_ck.clk);
+	if (!retval) {
+		retval = omap1_clk_enable(clk);
+		omap1_clk_unuse(&api_ck.clk);
+	}
+
+	return retval;
+}
+
+static void omap1_clk_disable_dsp_domain(struct clk *clk)
+{
+	if (omap1_clk_use(&api_ck.clk) == 0) {
+		omap1_clk_disable(clk);
+		omap1_clk_unuse(&api_ck.clk);
+	}
+}
+
+static int omap1_clk_enable_uart_functional(struct clk *clk)
+{
+	int ret;
+	struct uart_clk *uclk;
+
+	ret = omap1_clk_enable(clk);
+	if (ret == 0) {
+		/* Set smart idle acknowledgement mode */
+		uclk = (struct uart_clk *)clk;
+		omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
+			    uclk->sysc_addr);
+	}
+
+	return ret;
+}
+
+static void omap1_clk_disable_uart_functional(struct clk *clk)
+{
+	struct uart_clk *uclk;
+
+	/* Set force idle acknowledgement mode */
+	uclk = (struct uart_clk *)clk;
+	omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
+
+	omap1_clk_disable(clk);
+}
+
+static void omap1_clk_allow_idle(struct clk *clk)
+{
+	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
+
+	if (!(clk->flags & CLOCK_IDLE_CONTROL))
+		return;
+
+	if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
+		arm_idlect1_mask |= 1 << iclk->idlect_shift;
+}
+
+static void omap1_clk_deny_idle(struct clk *clk)
+{
+	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
+
+	if (!(clk->flags & CLOCK_IDLE_CONTROL))
+		return;
+
+	if (iclk->no_idle_count++ == 0)
+		arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
+}
+
+static __u16 verify_ckctl_value(__u16 newval)
+{
+	/* This function checks for following limitations set
+	 * by the hardware (all conditions must be true):
+	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
+	 * ARM_CK >= TC_CK
+	 * DSP_CK >= TC_CK
+	 * DSPMMU_CK >= TC_CK
+	 *
+	 * In addition following rules are enforced:
+	 * LCD_CK <= TC_CK
+	 * ARMPER_CK <= TC_CK
+	 *
+	 * However, maximum frequencies are not checked for!
+	 */
+	__u8 per_exp;
+	__u8 lcd_exp;
+	__u8 arm_exp;
+	__u8 dsp_exp;
+	__u8 tc_exp;
+	__u8 dspmmu_exp;
+
+	per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
+	lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
+	arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
+	dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
+	tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
+	dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
+
+	if (dspmmu_exp < dsp_exp)
+		dspmmu_exp = dsp_exp;
+	if (dspmmu_exp > dsp_exp+1)
+		dspmmu_exp = dsp_exp+1;
+	if (tc_exp < arm_exp)
+		tc_exp = arm_exp;
+	if (tc_exp < dspmmu_exp)
+		tc_exp = dspmmu_exp;
+	if (tc_exp > lcd_exp)
+		lcd_exp = tc_exp;
+	if (tc_exp > per_exp)
+		per_exp = tc_exp;
+
+	newval &= 0xf000;
+	newval |= per_exp << CKCTL_PERDIV_OFFSET;
+	newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
+	newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
+	newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
+	newval |= tc_exp << CKCTL_TCDIV_OFFSET;
+	newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
+
+	return newval;
+}
+
+static int calc_dsor_exp(struct clk *clk, unsigned long rate)
+{
+	/* Note: If target frequency is too low, this function will return 4,
+	 * which is invalid value. Caller must check for this value and act
+	 * accordingly.
+	 *
+	 * Note: This function does not check for following limitations set
+	 * by the hardware (all conditions must be true):
+	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
+	 * ARM_CK >= TC_CK
+	 * DSP_CK >= TC_CK
+	 * DSPMMU_CK >= TC_CK
+	 */
+	unsigned long realrate;
+	struct clk * parent;
+	unsigned  dsor_exp;
+
+	if (unlikely(!(clk->flags & RATE_CKCTL)))
+		return -EINVAL;
+
+	parent = clk->parent;
+	if (unlikely(parent == 0))
+		return -EIO;
+
+	realrate = parent->rate;
+	for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
+		if (realrate <= rate)
+			break;
+
+		realrate /= 2;
+	}
+
+	return dsor_exp;
+}
+
+static void omap1_ckctl_recalc(struct clk * clk)
+{
+	int dsor;
+
+	/* Calculate divisor encoded as 2-bit exponent */
+	dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
+
+	if (unlikely(clk->rate == clk->parent->rate / dsor))
+		return; /* No change, quick exit */
+	clk->rate = clk->parent->rate / dsor;
+
+	if (unlikely(clk->flags & RATE_PROPAGATES))
+		propagate_rate(clk);
+}
+
+static void omap1_ckctl_recalc_dsp_domain(struct clk * clk)
+{
+	int dsor;
+
+	/* Calculate divisor encoded as 2-bit exponent
+	 *
+	 * The clock control bits are in DSP domain,
+	 * so api_ck is needed for access.
+	 * Note that DSP_CKCTL virt addr = phys addr, so
+	 * we must use __raw_readw() instead of omap_readw().
+	 */
+	omap1_clk_use(&api_ck.clk);
+	dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
+	omap1_clk_unuse(&api_ck.clk);
+
+	if (unlikely(clk->rate == clk->parent->rate / dsor))
+		return; /* No change, quick exit */
+	clk->rate = clk->parent->rate / dsor;
+
+	if (unlikely(clk->flags & RATE_PROPAGATES))
+		propagate_rate(clk);
+}
+
+/* MPU virtual clock functions */
+static int omap1_select_table_rate(struct clk * clk, unsigned long rate)
+{
+	/* Find the highest supported frequency <= rate and switch to it */
+	struct mpu_rate * ptr;
+
+	if (clk != &virtual_ck_mpu)
+		return -EINVAL;
+
+	for (ptr = rate_table; ptr->rate; ptr++) {
+		if (ptr->xtal != ck_ref.rate)
+			continue;
+
+		/* DPLL1 cannot be reprogrammed without risking system crash */
+		if (likely(ck_dpll1.rate!=0) && ptr->pll_rate != ck_dpll1.rate)
+			continue;
+
+		/* Can check only after xtal frequency check */
+		if (ptr->rate <= rate)
+			break;
+	}
+
+	if (!ptr->rate)
+		return -EINVAL;
+
+	/*
+	 * In most cases we should not need to reprogram DPLL.
+	 * Reprogramming the DPLL is tricky, it must be done from SRAM.
+	 */
+	omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
+
+	ck_dpll1.rate = ptr->pll_rate;
+	propagate_rate(&ck_dpll1);
+	return 0;
+}
+
+static int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
+{
+	int  ret = -EINVAL;
+	int  dsor_exp;
+	__u16  regval;
+
+	if (clk->flags & RATE_CKCTL) {
+		dsor_exp = calc_dsor_exp(clk, rate);
+		if (dsor_exp > 3)
+			dsor_exp = -EINVAL;
+		if (dsor_exp < 0)
+			return dsor_exp;
+
+		regval = __raw_readw(DSP_CKCTL);
+		regval &= ~(3 << clk->rate_offset);
+		regval |= dsor_exp << clk->rate_offset;
+		__raw_writew(regval, DSP_CKCTL);
+		clk->rate = clk->parent->rate / (1 << dsor_exp);
+		ret = 0;
+	}
+
+	if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
+		propagate_rate(clk);
+
+	return ret;
+}
+
+static long omap1_round_to_table_rate(struct clk * clk, unsigned long rate)
+{
+	/* Find the highest supported frequency <= rate */
+	struct mpu_rate * ptr;
+	long  highest_rate;
+
+	if (clk != &virtual_ck_mpu)
+		return -EINVAL;
+
+	highest_rate = -EINVAL;
+
+	for (ptr = rate_table; ptr->rate; ptr++) {
+		if (ptr->xtal != ck_ref.rate)
+			continue;
+
+		highest_rate = ptr->rate;
+
+		/* Can check only after xtal frequency check */
+		if (ptr->rate <= rate)
+			break;
+	}
+
+	return highest_rate;
+}
+
+static unsigned calc_ext_dsor(unsigned long rate)
+{
+	unsigned dsor;
+
+	/* MCLK and BCLK divisor selection is not linear:
+	 * freq = 96MHz / dsor
+	 *
+	 * RATIO_SEL range: dsor <-> RATIO_SEL
+	 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
+	 * 6..48:  (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
+	 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
+	 * can not be used.
+	 */
+	for (dsor = 2; dsor < 96; ++dsor) {
+		if ((dsor & 1) && dsor > 8)
+		  	continue;
+		if (rate >= 96000000 / dsor)
+			break;
+	}
+	return dsor;
+}
+
+/* Only needed on 1510 */
+static int omap1_set_uart_rate(struct clk * clk, unsigned long rate)
+{
+	unsigned int val;
+
+	val = omap_readl(clk->enable_reg);
+	if (rate == 12000000)
+		val &= ~(1 << clk->enable_bit);
+	else if (rate == 48000000)
+		val |= (1 << clk->enable_bit);
+	else
+		return -EINVAL;
+	omap_writel(val, clk->enable_reg);
+	clk->rate = rate;
+
+	return 0;
+}
+
+/* External clock (MCLK & BCLK) functions */
+static int omap1_set_ext_clk_rate(struct clk * clk, unsigned long rate)
+{
+	unsigned dsor;
+	__u16 ratio_bits;
+
+	dsor = calc_ext_dsor(rate);
+	clk->rate = 96000000 / dsor;
+	if (dsor > 8)
+		ratio_bits = ((dsor - 8) / 2 + 6) << 2;
+	else
+		ratio_bits = (dsor - 2) << 2;
+
+	ratio_bits |= omap_readw(clk->enable_reg) & ~0xfd;
+	omap_writew(ratio_bits, clk->enable_reg);
+
+	return 0;
+}
+
+static long omap1_round_ext_clk_rate(struct clk * clk, unsigned long rate)
+{
+	return 96000000 / calc_ext_dsor(rate);
+}
+
+static void omap1_init_ext_clk(struct clk * clk)
+{
+	unsigned dsor;
+	__u16 ratio_bits;
+
+	/* Determine current rate and ensure clock is based on 96MHz APLL */
+	ratio_bits = omap_readw(clk->enable_reg) & ~1;
+	omap_writew(ratio_bits, clk->enable_reg);
+
+	ratio_bits = (ratio_bits & 0xfc) >> 2;
+	if (ratio_bits > 6)
+		dsor = (ratio_bits - 6) * 2 + 8;
+	else
+		dsor = ratio_bits + 2;
+
+	clk-> rate = 96000000 / dsor;
+}
+
+static int omap1_clk_use(struct clk *clk)
+{
+	int ret = 0;
+	if (clk->usecount++ == 0) {
+		if (likely(clk->parent)) {
+			ret = omap1_clk_use(clk->parent);
+
+			if (unlikely(ret != 0)) {
+				clk->usecount--;
+				return ret;
+			}
+
+			if (clk->flags & CLOCK_NO_IDLE_PARENT)
+				if (!cpu_is_omap24xx())
+					omap1_clk_deny_idle(clk->parent);
+		}
+
+		ret = clk->enable(clk);
+
+		if (unlikely(ret != 0) && clk->parent) {
+			omap1_clk_unuse(clk->parent);
+			clk->usecount--;
+		}
+	}
+
+	return ret;
+}
+
+static void omap1_clk_unuse(struct clk *clk)
+{
+	if (clk->usecount > 0 && !(--clk->usecount)) {
+		clk->disable(clk);
+		if (likely(clk->parent)) {
+			omap1_clk_unuse(clk->parent);
+			if (clk->flags & CLOCK_NO_IDLE_PARENT)
+				if (!cpu_is_omap24xx())
+					omap1_clk_allow_idle(clk->parent);
+		}
+	}
+}
+
+static int omap1_clk_enable(struct clk *clk)
+{
+	__u16 regval16;
+	__u32 regval32;
+
+	if (clk->flags & ALWAYS_ENABLED)
+		return 0;
+
+	if (unlikely(clk->enable_reg == 0)) {
+		printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
+		       clk->name);
+		return 0;
+	}
+
+	if (clk->flags & ENABLE_REG_32BIT) {
+		if (clk->flags & VIRTUAL_IO_ADDRESS) {
+			regval32 = __raw_readl(clk->enable_reg);
+			regval32 |= (1 << clk->enable_bit);
+			__raw_writel(regval32, clk->enable_reg);
+		} else {
+			regval32 = omap_readl(clk->enable_reg);
+			regval32 |= (1 << clk->enable_bit);
+			omap_writel(regval32, clk->enable_reg);
+		}
+	} else {
+		if (clk->flags & VIRTUAL_IO_ADDRESS) {
+			regval16 = __raw_readw(clk->enable_reg);
+			regval16 |= (1 << clk->enable_bit);
+			__raw_writew(regval16, clk->enable_reg);
+		} else {
+			regval16 = omap_readw(clk->enable_reg);
+			regval16 |= (1 << clk->enable_bit);
+			omap_writew(regval16, clk->enable_reg);
+		}
+	}
+
+	return 0;
+}
+
+static void omap1_clk_disable(struct clk *clk)
+{
+	__u16 regval16;
+	__u32 regval32;
+
+	if (clk->enable_reg == 0)
+		return;
+
+	if (clk->flags & ENABLE_REG_32BIT) {
+		if (clk->flags & VIRTUAL_IO_ADDRESS) {
+			regval32 = __raw_readl(clk->enable_reg);
+			regval32 &= ~(1 << clk->enable_bit);
+			__raw_writel(regval32, clk->enable_reg);
+		} else {
+			regval32 = omap_readl(clk->enable_reg);
+			regval32 &= ~(1 << clk->enable_bit);
+			omap_writel(regval32, clk->enable_reg);
+		}
+	} else {
+		if (clk->flags & VIRTUAL_IO_ADDRESS) {
+			regval16 = __raw_readw(clk->enable_reg);
+			regval16 &= ~(1 << clk->enable_bit);
+			__raw_writew(regval16, clk->enable_reg);
+		} else {
+			regval16 = omap_readw(clk->enable_reg);
+			regval16 &= ~(1 << clk->enable_bit);
+			omap_writew(regval16, clk->enable_reg);
+		}
+	}
+}
+
+static long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	int dsor_exp;
+
+	if (clk->flags & RATE_FIXED)
+		return clk->rate;
+
+	if (clk->flags & RATE_CKCTL) {
+		dsor_exp = calc_dsor_exp(clk, rate);
+		if (dsor_exp < 0)
+			return dsor_exp;
+		if (dsor_exp > 3)
+			dsor_exp = 3;
+		return clk->parent->rate / (1 << dsor_exp);
+	}
+
+	if(clk->round_rate != 0)
+		return clk->round_rate(clk, rate);
+
+	return clk->rate;
+}
+
+static int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	int  ret = -EINVAL;
+	int  dsor_exp;
+	__u16  regval;
+
+	if (clk->set_rate)
+		ret = clk->set_rate(clk, rate);
+	else if (clk->flags & RATE_CKCTL) {
+		dsor_exp = calc_dsor_exp(clk, rate);
+		if (dsor_exp > 3)
+			dsor_exp = -EINVAL;
+		if (dsor_exp < 0)
+			return dsor_exp;
+
+		regval = omap_readw(ARM_CKCTL);
+		regval &= ~(3 << clk->rate_offset);
+		regval |= dsor_exp << clk->rate_offset;
+		regval = verify_ckctl_value(regval);
+		omap_writew(regval, ARM_CKCTL);
+		clk->rate = clk->parent->rate / (1 << dsor_exp);
+		ret = 0;
+	}
+
+	if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
+		propagate_rate(clk);
+
+	return ret;
+}
+
+/*-------------------------------------------------------------------------
+ * Omap1 clock reset and init functions
+ *-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_OMAP_RESET_CLOCKS
+/*
+ * Resets some clocks that may be left on from bootloader,
+ * but leaves serial clocks on. See also omap_late_clk_reset().
+ */
+static inline void omap1_early_clk_reset(void)
+{
+	//omap_writel(0x3 << 29, MOD_CONF_CTRL_0);
+}
+
+static int __init omap1_late_clk_reset(void)
+{
+	/* Turn off all unused clocks */
+	struct clk *p;
+	__u32 regval32;
+
+	/* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
+	regval32 = omap_readw(SOFT_REQ_REG) & (1 << 4);
+	omap_writew(regval32, SOFT_REQ_REG);
+	omap_writew(0, SOFT_REQ_REG2);
+
+	list_for_each_entry(p, &clocks, node) {
+		if (p->usecount > 0 || (p->flags & ALWAYS_ENABLED) ||
+			p->enable_reg == 0)
+			continue;
+
+		/* Clocks in the DSP domain need api_ck. Just assume bootloader
+		 * has not enabled any DSP clocks */
+		if ((u32)p->enable_reg == DSP_IDLECT2) {
+			printk(KERN_INFO "Skipping reset check for DSP domain "
+			       "clock \"%s\"\n", p->name);
+			continue;
+		}
+
+		/* Is the clock already disabled? */
+		if (p->flags & ENABLE_REG_32BIT) {
+			if (p->flags & VIRTUAL_IO_ADDRESS)
+				regval32 = __raw_readl(p->enable_reg);
+			else
+				regval32 = omap_readl(p->enable_reg);
+		} else {
+			if (p->flags & VIRTUAL_IO_ADDRESS)
+				regval32 = __raw_readw(p->enable_reg);
+			else
+				regval32 = omap_readw(p->enable_reg);
+		}
+
+		if ((regval32 & (1 << p->enable_bit)) == 0)
+			continue;
+
+		/* FIXME: This clock seems to be necessary but no-one
+		 * has asked for its activation. */
+		if (p == &tc2_ck         // FIX: pm.c (SRAM), CCP, Camera
+		    || p == &ck_dpll1out.clk // FIX: SoSSI, SSR
+		    || p == &arm_gpio_ck // FIX: GPIO code for 1510
+		    ) {
+			printk(KERN_INFO "FIXME: Clock \"%s\" seems unused\n",
+			       p->name);
+			continue;
+		}
+
+		printk(KERN_INFO "Disabling unused clock \"%s\"... ", p->name);
+		p->disable(p);
+		printk(" done\n");
+	}
+
+	return 0;
+}
+late_initcall(omap1_late_clk_reset);
+
+#else
+#define omap1_early_clk_reset()	{}
+#endif
+
+static struct clk_functions omap1_clk_functions = {
+	.clk_use		= omap1_clk_use,
+	.clk_unuse		= omap1_clk_unuse,
+	.clk_round_rate		= omap1_clk_round_rate,
+	.clk_set_rate		= omap1_clk_set_rate,
+};
+
+int __init omap1_clk_init(void)
+{
+	struct clk ** clkp;
+	const struct omap_clock_config *info;
+	int crystal_type = 0; /* Default 12 MHz */
+
+	omap1_early_clk_reset();
+	clk_init(&omap1_clk_functions);
+
+	/* By default all idlect1 clocks are allowed to idle */
+	arm_idlect1_mask = ~0;
+
+	for (clkp = onchip_clks; clkp < onchip_clks+ARRAY_SIZE(onchip_clks); clkp++) {
+		if (((*clkp)->flags &CLOCK_IN_OMAP1510) && cpu_is_omap1510()) {
+			clk_register(*clkp);
+			continue;
+		}
+
+		if (((*clkp)->flags &CLOCK_IN_OMAP16XX) && cpu_is_omap16xx()) {
+			clk_register(*clkp);
+			continue;
+		}
+
+		if (((*clkp)->flags &CLOCK_IN_OMAP730) && cpu_is_omap730()) {
+			clk_register(*clkp);
+			continue;
+		}
+	}
+
+	info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
+	if (info != NULL) {
+		if (!cpu_is_omap1510())
+			crystal_type = info->system_clock_type;
+	}
+
+#if defined(CONFIG_ARCH_OMAP730)
+	ck_ref.rate = 13000000;
+#elif defined(CONFIG_ARCH_OMAP16XX)
+	if (crystal_type == 2)
+		ck_ref.rate = 19200000;
+#endif
+
+	printk("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: 0x%04x\n",
+	       omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
+	       omap_readw(ARM_CKCTL));
+
+	/* We want to be in syncronous scalable mode */
+	omap_writew(0x1000, ARM_SYSST);
+
+#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
+	/* Use values set by bootloader. Determine PLL rate and recalculate
+	 * dependent clocks as if kernel had changed PLL or divisors.
+	 */
+	{
+		unsigned pll_ctl_val = omap_readw(DPLL_CTL);
+
+		ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */
+		if (pll_ctl_val & 0x10) {
+			/* PLL enabled, apply multiplier and divisor */
+			if (pll_ctl_val & 0xf80)
+				ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7;
+			ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1;
+		} else {
+			/* PLL disabled, apply bypass divisor */
+			switch (pll_ctl_val & 0xc) {
+			case 0:
+				break;
+			case 0x4:
+				ck_dpll1.rate /= 2;
+				break;
+			default:
+				ck_dpll1.rate /= 4;
+				break;
+			}
+		}
+	}
+	propagate_rate(&ck_dpll1);
+#else
+	/* Find the highest supported frequency and enable it */
+	if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+		printk(KERN_ERR "System frequencies not set. Check your config.\n");
+		/* Guess sane values (60MHz) */
+		omap_writew(0x2290, DPLL_CTL);
+		omap_writew(0x1005, ARM_CKCTL);
+		ck_dpll1.rate = 60000000;
+		propagate_rate(&ck_dpll1);
+	}
+#endif
+	/* Cache rates for clocks connected to ck_ref (not dpll1) */
+	propagate_rate(&ck_ref);
+	printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
+		"%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
+	       ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
+	       ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
+	       arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
+
+#ifdef CONFIG_MACH_OMAP_PERSEUS2
+	/* Select slicer output as OMAP input clock */
+	omap_writew(omap_readw(OMAP730_PCC_UPLD_CTRL) & ~0x1, OMAP730_PCC_UPLD_CTRL);
+#endif
+
+	/* Turn off DSP and ARM_TIMXO. Make sure ARM_INTHCK is not divided */
+	omap_writew(omap_readw(ARM_CKCTL) & 0x0fff, ARM_CKCTL);
+
+	/* Put DSP/MPUI into reset until needed */
+	omap_writew(0, ARM_RSTCT1);
+	omap_writew(1, ARM_RSTCT2);
+	omap_writew(0x400, ARM_IDLECT1);
+
+	/*
+	 * According to OMAP5910 Erratum SYS_DMA_1, bit DMACK_REQ (bit 8)
+	 * of the ARM_IDLECT2 register must be set to zero. The power-on
+	 * default value of this bit is one.
+	 */
+	omap_writew(0x0000, ARM_IDLECT2);	/* Turn LCD clock off also */
+
+	/*
+	 * Only enable those clocks we will need, let the drivers
+	 * enable other clocks as necessary
+	 */
+	clk_use(&armper_ck.clk);
+	clk_use(&armxor_ck.clk);
+	clk_use(&armtim_ck.clk); /* This should be done by timer code */
+
+	if (cpu_is_omap1510())
+		clk_enable(&arm_gpio_ck);
+
+	return 0;
+}
+
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
new file mode 100644
index 0000000..f3bdfb5
--- /dev/null
+++ b/arch/arm/mach-omap1/clock.h
@@ -0,0 +1,768 @@
+/*
+ *  linux/arch/arm/mach-omap1/clock.h
+ *
+ *  Copyright (C) 2004 - 2005 Nokia corporation
+ *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *  Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP1_CLOCK_H
+#define __ARCH_ARM_MACH_OMAP1_CLOCK_H
+
+static int omap1_clk_enable(struct clk * clk);
+static void omap1_clk_disable(struct clk * clk);
+static void omap1_ckctl_recalc(struct clk * clk);
+static void omap1_watchdog_recalc(struct clk * clk);
+static void omap1_ckctl_recalc_dsp_domain(struct clk * clk);
+static int omap1_clk_enable_dsp_domain(struct clk * clk);
+static int omap1_clk_set_rate_dsp_domain(struct clk * clk, unsigned long rate);
+static void omap1_clk_disable_dsp_domain(struct clk * clk);
+static int omap1_set_uart_rate(struct clk * clk, unsigned long rate);
+static void omap1_uart_recalc(struct clk * clk);
+static int omap1_clk_enable_uart_functional(struct clk * clk);
+static void omap1_clk_disable_uart_functional(struct clk * clk);
+static int omap1_set_ext_clk_rate(struct clk * clk, unsigned long rate);
+static long omap1_round_ext_clk_rate(struct clk * clk, unsigned long rate);
+static void omap1_init_ext_clk(struct clk * clk);
+static int omap1_select_table_rate(struct clk * clk, unsigned long rate);
+static long omap1_round_to_table_rate(struct clk * clk, unsigned long rate);
+static int omap1_clk_use(struct clk *clk);
+static void omap1_clk_unuse(struct clk *clk);
+
+struct mpu_rate {
+	unsigned long		rate;
+	unsigned long		xtal;
+	unsigned long		pll_rate;
+	__u16			ckctl_val;
+	__u16			dpllctl_val;
+};
+
+struct uart_clk {
+	struct clk	clk;
+	unsigned long	sysc_addr;
+};
+
+/* Provide a method for preventing idling some ARM IDLECT clocks */
+struct arm_idlect1_clk {
+	struct clk	clk;
+	unsigned long	no_idle_count;
+	__u8		idlect_shift;
+};
+
+/* ARM_CKCTL bit shifts */
+#define CKCTL_PERDIV_OFFSET	0
+#define CKCTL_LCDDIV_OFFSET	2
+#define CKCTL_ARMDIV_OFFSET	4
+#define CKCTL_DSPDIV_OFFSET	6
+#define CKCTL_TCDIV_OFFSET	8
+#define CKCTL_DSPMMUDIV_OFFSET	10
+/*#define ARM_TIMXO		12*/
+#define EN_DSPCK		13
+/*#define ARM_INTHCK_SEL	14*/ /* Divide-by-2 for mpu inth_ck */
+/* DSP_CKCTL bit shifts */
+#define CKCTL_DSPPERDIV_OFFSET	0
+
+/* ARM_IDLECT2 bit shifts */
+#define EN_WDTCK	0
+#define EN_XORPCK	1
+#define EN_PERCK	2
+#define EN_LCDCK	3
+#define EN_LBCK		4 /* Not on 1610/1710 */
+/*#define EN_HSABCK	5*/
+#define EN_APICK	6
+#define EN_TIMCK	7
+#define DMACK_REQ	8
+#define EN_GPIOCK	9 /* Not on 1610/1710 */
+/*#define EN_LBFREECK	10*/
+#define EN_CKOUT_ARM	11
+
+/* ARM_IDLECT3 bit shifts */
+#define EN_OCPI_CK	0
+#define EN_TC1_CK	2
+#define EN_TC2_CK	4
+
+/* DSP_IDLECT2 bit shifts (0,1,2 are same as for ARM_IDLECT2) */
+#define EN_DSPTIMCK	5
+
+/* Various register defines for clock controls scattered around OMAP chip */
+#define USB_MCLK_EN_BIT		4	/* In ULPD_CLKC_CTRL */
+#define USB_HOST_HHC_UHOST_EN	9	/* In MOD_CONF_CTRL_0 */
+#define SWD_ULPD_PLL_CLK_REQ	1	/* In SWD_CLK_DIV_CTRL_SEL */
+#define COM_ULPD_PLL_CLK_REQ	1	/* In COM_CLK_DIV_CTRL_SEL */
+#define SWD_CLK_DIV_CTRL_SEL	0xfffe0874
+#define COM_CLK_DIV_CTRL_SEL	0xfffe0878
+#define SOFT_REQ_REG		0xfffe0834
+#define SOFT_REQ_REG2		0xfffe0880
+
+/*-------------------------------------------------------------------------
+ * Omap1 MPU rate table
+ *-------------------------------------------------------------------------*/
+static struct mpu_rate rate_table[] = {
+	/* MPU MHz, xtal MHz, dpll1 MHz, CKCTL, DPLL_CTL
+	 * NOTE: Comment order here is different from bits in CKCTL value:
+	 * armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
+	 */
+#if defined(CONFIG_OMAP_ARM_216MHZ)
+	{ 216000000, 12000000, 216000000, 0x050d, 0x2910 }, /* 1/1/2/2/2/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_195MHZ)
+	{ 195000000, 13000000, 195000000, 0x050e, 0x2790 }, /* 1/1/2/2/4/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_192MHZ)
+	{ 192000000, 19200000, 192000000, 0x050f, 0x2510 }, /* 1/1/2/2/8/8 */
+	{ 192000000, 12000000, 192000000, 0x050f, 0x2810 }, /* 1/1/2/2/8/8 */
+	{  96000000, 12000000, 192000000, 0x055f, 0x2810 }, /* 2/2/2/2/8/8 */
+	{  48000000, 12000000, 192000000, 0x0baf, 0x2810 }, /* 4/4/4/8/8/8 */
+	{  24000000, 12000000, 192000000, 0x0fff, 0x2810 }, /* 8/8/8/8/8/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_182MHZ)
+	{ 182000000, 13000000, 182000000, 0x050e, 0x2710 }, /* 1/1/2/2/4/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_168MHZ)
+	{ 168000000, 12000000, 168000000, 0x010f, 0x2710 }, /* 1/1/1/2/8/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_150MHZ)
+	{ 150000000, 12000000, 150000000, 0x010a, 0x2cb0 }, /* 1/1/1/2/4/4 */
+#endif
+#if defined(CONFIG_OMAP_ARM_120MHZ)
+	{ 120000000, 12000000, 120000000, 0x010a, 0x2510 }, /* 1/1/1/2/4/4 */
+#endif
+#if defined(CONFIG_OMAP_ARM_96MHZ)
+	{  96000000, 12000000,  96000000, 0x0005, 0x2410 }, /* 1/1/1/1/2/2 */
+#endif
+#if defined(CONFIG_OMAP_ARM_60MHZ)
+	{  60000000, 12000000,  60000000, 0x0005, 0x2290 }, /* 1/1/1/1/2/2 */
+#endif
+#if defined(CONFIG_OMAP_ARM_30MHZ)
+	{  30000000, 12000000,  60000000, 0x0555, 0x2290 }, /* 2/2/2/2/2/2 */
+#endif
+	{ 0, 0, 0, 0, 0 },
+};
+
+/*-------------------------------------------------------------------------
+ * Omap1 clocks
+ *-------------------------------------------------------------------------*/
+
+static struct clk ck_ref = {
+	.name		= "ck_ref",
+	.rate		= 12000000,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  ALWAYS_ENABLED,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk ck_dpll1 = {
+	.name		= "ck_dpll1",
+	.parent		= &ck_ref,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  RATE_PROPAGATES | ALWAYS_ENABLED,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk ck_dpll1out = {
+	.clk = {
+	       	.name		= "ck_dpll1out",
+		.parent		= &ck_dpll1,
+		.flags		= CLOCK_IN_OMAP16XX | CLOCK_IDLE_CONTROL,
+		.enable_reg	= (void __iomem *)ARM_IDLECT2,
+		.enable_bit	= EN_CKOUT_ARM,
+		.recalc		= &followparent_recalc,
+		.enable		= &omap1_clk_enable,
+		.disable	= &omap1_clk_disable,
+	},
+	.idlect_shift	= 12,
+};
+
+static struct clk arm_ck = {
+	.name		= "arm_ck",
+	.parent		= &ck_dpll1,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
+	.rate_offset	= CKCTL_ARMDIV_OFFSET,
+	.recalc		= &omap1_ckctl_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk armper_ck = {
+	.clk = {
+		.name		= "armper_ck",
+		.parent		= &ck_dpll1,
+		.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+				  RATE_CKCTL | CLOCK_IDLE_CONTROL,
+		.enable_reg	= (void __iomem *)ARM_IDLECT2,
+		.enable_bit	= EN_PERCK,
+		.rate_offset	= CKCTL_PERDIV_OFFSET,
+		.recalc		= &omap1_ckctl_recalc,
+		.enable		= &omap1_clk_enable,
+		.disable	= &omap1_clk_disable,
+	},
+	.idlect_shift	= 2,
+};
+
+static struct clk arm_gpio_ck = {
+	.name		= "arm_gpio_ck",
+	.parent		= &ck_dpll1,
+	.flags		= CLOCK_IN_OMAP1510,
+	.enable_reg	= (void __iomem *)ARM_IDLECT2,
+	.enable_bit	= EN_GPIOCK,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk armxor_ck = {
+	.clk = {
+		.name		= "armxor_ck",
+		.parent		= &ck_ref,
+		.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+				  CLOCK_IDLE_CONTROL,
+		.enable_reg	= (void __iomem *)ARM_IDLECT2,
+		.enable_bit	= EN_XORPCK,
+		.recalc		= &followparent_recalc,
+		.enable		= &omap1_clk_enable,
+		.disable	= &omap1_clk_disable,
+	},
+	.idlect_shift	= 1,
+};
+
+static struct arm_idlect1_clk armtim_ck = {
+	.clk = {
+		.name		= "armtim_ck",
+		.parent		= &ck_ref,
+		.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+				  CLOCK_IDLE_CONTROL,
+		.enable_reg	= (void __iomem *)ARM_IDLECT2,
+		.enable_bit	= EN_TIMCK,
+		.recalc		= &followparent_recalc,
+		.enable		= &omap1_clk_enable,
+		.disable	= &omap1_clk_disable,
+	},
+	.idlect_shift	= 9,
+};
+
+static struct arm_idlect1_clk armwdt_ck = {
+	.clk = {
+		.name		= "armwdt_ck",
+		.parent		= &ck_ref,
+		.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+				  CLOCK_IDLE_CONTROL,
+		.enable_reg	= (void __iomem *)ARM_IDLECT2,
+		.enable_bit	= EN_WDTCK,
+		.recalc		= &omap1_watchdog_recalc,
+		.enable		= &omap1_clk_enable,
+		.disable	= &omap1_clk_disable,
+	},
+	.idlect_shift	= 0,
+};
+
+static struct clk arminth_ck16xx = {
+	.name		= "arminth_ck",
+	.parent		= &arm_ck,
+	.flags		= CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+	.recalc		= &followparent_recalc,
+	/* Note: On 16xx the frequency can be divided by 2 by programming
+	 * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
+	 *
+	 * 1510 version is in TC clocks.
+	 */
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk dsp_ck = {
+	.name		= "dsp_ck",
+	.parent		= &ck_dpll1,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  RATE_CKCTL,
+	.enable_reg	= (void __iomem *)ARM_CKCTL,
+	.enable_bit	= EN_DSPCK,
+	.rate_offset	= CKCTL_DSPDIV_OFFSET,
+	.recalc		= &omap1_ckctl_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk dspmmu_ck = {
+	.name		= "dspmmu_ck",
+	.parent		= &ck_dpll1,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  RATE_CKCTL | ALWAYS_ENABLED,
+	.rate_offset	= CKCTL_DSPMMUDIV_OFFSET,
+	.recalc		= &omap1_ckctl_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk dspper_ck = {
+	.name		= "dspper_ck",
+	.parent		= &ck_dpll1,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  RATE_CKCTL | VIRTUAL_IO_ADDRESS,
+	.enable_reg	= (void __iomem *)DSP_IDLECT2,
+	.enable_bit	= EN_PERCK,
+	.rate_offset	= CKCTL_PERDIV_OFFSET,
+	.recalc		= &omap1_ckctl_recalc_dsp_domain,
+	.set_rate	= &omap1_clk_set_rate_dsp_domain,
+	.enable		= &omap1_clk_enable_dsp_domain,
+	.disable	= &omap1_clk_disable_dsp_domain,
+};
+
+static struct clk dspxor_ck = {
+	.name		= "dspxor_ck",
+	.parent		= &ck_ref,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  VIRTUAL_IO_ADDRESS,
+	.enable_reg	= (void __iomem *)DSP_IDLECT2,
+	.enable_bit	= EN_XORPCK,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable_dsp_domain,
+	.disable	= &omap1_clk_disable_dsp_domain,
+};
+
+static struct clk dsptim_ck = {
+	.name		= "dsptim_ck",
+	.parent		= &ck_ref,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  VIRTUAL_IO_ADDRESS,
+	.enable_reg	= (void __iomem *)DSP_IDLECT2,
+	.enable_bit	= EN_DSPTIMCK,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable_dsp_domain,
+	.disable	= &omap1_clk_disable_dsp_domain,
+};
+
+/* Tie ARM_IDLECT1:IDLIF_ARM to this logical clock structure */
+static struct arm_idlect1_clk tc_ck = {
+	.clk = {
+		.name		= "tc_ck",
+		.parent		= &ck_dpll1,
+		.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+				  CLOCK_IN_OMAP730 | RATE_CKCTL |
+				  RATE_PROPAGATES | ALWAYS_ENABLED |
+				  CLOCK_IDLE_CONTROL,
+		.rate_offset	= CKCTL_TCDIV_OFFSET,
+		.recalc		= &omap1_ckctl_recalc,
+		.enable		= &omap1_clk_enable,
+		.disable	= &omap1_clk_disable,
+	},
+	.idlect_shift	= 6,
+};
+
+static struct clk arminth_ck1510 = {
+	.name		= "arminth_ck",
+	.parent		= &tc_ck.clk,
+	.flags		= CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
+	.recalc		= &followparent_recalc,
+	/* Note: On 1510 the frequency follows TC_CK
+	 *
+	 * 16xx version is in MPU clocks.
+	 */
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk tipb_ck = {
+	/* No-idle controlled by "tc_ck" */
+	.name		= "tibp_ck",
+	.parent		= &tc_ck.clk,
+	.flags		= CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk l3_ocpi_ck = {
+	/* No-idle controlled by "tc_ck" */
+	.name		= "l3_ocpi_ck",
+	.parent		= &tc_ck.clk,
+	.flags		= CLOCK_IN_OMAP16XX,
+	.enable_reg	= (void __iomem *)ARM_IDLECT3,
+	.enable_bit	= EN_OCPI_CK,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk tc1_ck = {
+	.name		= "tc1_ck",
+	.parent		= &tc_ck.clk,
+	.flags		= CLOCK_IN_OMAP16XX,
+	.enable_reg	= (void __iomem *)ARM_IDLECT3,
+	.enable_bit	= EN_TC1_CK,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk tc2_ck = {
+	.name		= "tc2_ck",
+	.parent		= &tc_ck.clk,
+	.flags		= CLOCK_IN_OMAP16XX,
+	.enable_reg	= (void __iomem *)ARM_IDLECT3,
+	.enable_bit	= EN_TC2_CK,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk dma_ck = {
+	/* No-idle controlled by "tc_ck" */
+	.name		= "dma_ck",
+	.parent		= &tc_ck.clk,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  ALWAYS_ENABLED,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk dma_lcdfree_ck = {
+	.name		= "dma_lcdfree_ck",
+	.parent		= &tc_ck.clk,
+	.flags		= CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk api_ck = {
+	.clk = {
+		.name		= "api_ck",
+		.parent		= &tc_ck.clk,
+		.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+				  CLOCK_IDLE_CONTROL,
+		.enable_reg	= (void __iomem *)ARM_IDLECT2,
+		.enable_bit	= EN_APICK,
+		.recalc		= &followparent_recalc,
+		.enable		= &omap1_clk_enable,
+		.disable	= &omap1_clk_disable,
+	},
+	.idlect_shift	= 8,
+};
+
+static struct arm_idlect1_clk lb_ck = {
+	.clk = {
+		.name		= "lb_ck",
+		.parent		= &tc_ck.clk,
+		.flags		= CLOCK_IN_OMAP1510 | CLOCK_IDLE_CONTROL,
+		.enable_reg	= (void __iomem *)ARM_IDLECT2,
+		.enable_bit	= EN_LBCK,
+		.recalc		= &followparent_recalc,
+		.enable		= &omap1_clk_enable,
+		.disable	= &omap1_clk_disable,
+	},
+	.idlect_shift	= 4,
+};
+
+static struct clk rhea1_ck = {
+	.name		= "rhea1_ck",
+	.parent		= &tc_ck.clk,
+	.flags		= CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk rhea2_ck = {
+	.name		= "rhea2_ck",
+	.parent		= &tc_ck.clk,
+	.flags		= CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+	.recalc		= &followparent_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk lcd_ck_16xx = {
+	.name		= "lcd_ck",
+	.parent		= &ck_dpll1,
+	.flags		= CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730 | RATE_CKCTL,
+	.enable_reg	= (void __iomem *)ARM_IDLECT2,
+	.enable_bit	= EN_LCDCK,
+	.rate_offset	= CKCTL_LCDDIV_OFFSET,
+	.recalc		= &omap1_ckctl_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk lcd_ck_1510 = {
+	.clk = {
+		.name		= "lcd_ck",
+		.parent		= &ck_dpll1,
+		.flags		= CLOCK_IN_OMAP1510 | RATE_CKCTL |
+				  CLOCK_IDLE_CONTROL,
+		.enable_reg	= (void __iomem *)ARM_IDLECT2,
+		.enable_bit	= EN_LCDCK,
+		.rate_offset	= CKCTL_LCDDIV_OFFSET,
+		.recalc		= &omap1_ckctl_recalc,
+		.enable		= &omap1_clk_enable,
+		.disable	= &omap1_clk_disable,
+	},
+	.idlect_shift	= 3,
+};
+
+static struct clk uart1_1510 = {
+	.name		= "uart1_ck",
+	/* Direct from ULPD, no real parent */
+	.parent		= &armper_ck.clk,
+	.rate		= 12000000,
+	.flags		= CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT |
+			  ALWAYS_ENABLED | CLOCK_NO_IDLE_PARENT,
+	.enable_reg	= (void __iomem *)MOD_CONF_CTRL_0,
+	.enable_bit	= 29,	/* Chooses between 12MHz and 48MHz */
+	.set_rate	= &omap1_set_uart_rate,
+	.recalc		= &omap1_uart_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct uart_clk uart1_16xx = {
+	.clk	= {
+		.name		= "uart1_ck",
+		/* Direct from ULPD, no real parent */
+		.parent		= &armper_ck.clk,
+		.rate		= 48000000,
+		.flags		= CLOCK_IN_OMAP16XX | RATE_FIXED |
+				  ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+		.enable_reg	= (void __iomem *)MOD_CONF_CTRL_0,
+		.enable_bit	= 29,
+		.enable		= &omap1_clk_enable_uart_functional,
+		.disable	= &omap1_clk_disable_uart_functional,
+	},
+	.sysc_addr	= 0xfffb0054,
+};
+
+static struct clk uart2_ck = {
+	.name		= "uart2_ck",
+	/* Direct from ULPD, no real parent */
+	.parent		= &armper_ck.clk,
+	.rate		= 12000000,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  ENABLE_REG_32BIT | ALWAYS_ENABLED |
+			  CLOCK_NO_IDLE_PARENT,
+	.enable_reg	= (void __iomem *)MOD_CONF_CTRL_0,
+	.enable_bit	= 30,	/* Chooses between 12MHz and 48MHz */
+	.set_rate	= &omap1_set_uart_rate,
+	.recalc		= &omap1_uart_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk uart3_1510 = {
+	.name		= "uart3_ck",
+	/* Direct from ULPD, no real parent */
+	.parent		= &armper_ck.clk,
+	.rate		= 12000000,
+	.flags		= CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT |
+			  ALWAYS_ENABLED | CLOCK_NO_IDLE_PARENT,
+	.enable_reg	= (void __iomem *)MOD_CONF_CTRL_0,
+	.enable_bit	= 31,	/* Chooses between 12MHz and 48MHz */
+	.set_rate	= &omap1_set_uart_rate,
+	.recalc		= &omap1_uart_recalc,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct uart_clk uart3_16xx = {
+	.clk	= {
+		.name		= "uart3_ck",
+		/* Direct from ULPD, no real parent */
+		.parent		= &armper_ck.clk,
+		.rate		= 48000000,
+		.flags		= CLOCK_IN_OMAP16XX | RATE_FIXED |
+				  ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+		.enable_reg	= (void __iomem *)MOD_CONF_CTRL_0,
+		.enable_bit	= 31,
+		.enable		= &omap1_clk_enable_uart_functional,
+		.disable	= &omap1_clk_disable_uart_functional,
+	},
+	.sysc_addr	= 0xfffb9854,
+};
+
+static struct clk usb_clko = {	/* 6 MHz output on W4_USB_CLKO */
+	.name		= "usb_clko",
+	/* Direct from ULPD, no parent */
+	.rate		= 6000000,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  RATE_FIXED | ENABLE_REG_32BIT,
+	.enable_reg	= (void __iomem *)ULPD_CLOCK_CTRL,
+	.enable_bit	= USB_MCLK_EN_BIT,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk usb_hhc_ck1510 = {
+	.name		= "usb_hhc_ck",
+	/* Direct from ULPD, no parent */
+	.rate		= 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
+	.flags		= CLOCK_IN_OMAP1510 |
+			  RATE_FIXED | ENABLE_REG_32BIT,
+	.enable_reg	= (void __iomem *)MOD_CONF_CTRL_0,
+	.enable_bit	= USB_HOST_HHC_UHOST_EN,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk usb_hhc_ck16xx = {
+	.name		= "usb_hhc_ck",
+	/* Direct from ULPD, no parent */
+	.rate		= 48000000,
+	/* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
+	.flags		= CLOCK_IN_OMAP16XX |
+			  RATE_FIXED | ENABLE_REG_32BIT,
+	.enable_reg	= (void __iomem *)OTG_BASE + 0x08 /* OTG_SYSCON_2 */,
+	.enable_bit	= 8 /* UHOST_EN */,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk usb_dc_ck = {
+	.name		= "usb_dc_ck",
+	/* Direct from ULPD, no parent */
+	.rate		= 48000000,
+	.flags		= CLOCK_IN_OMAP16XX | RATE_FIXED,
+	.enable_reg	= (void __iomem *)SOFT_REQ_REG,
+	.enable_bit	= 4,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk mclk_1510 = {
+	.name		= "mclk",
+	/* Direct from ULPD, no parent. May be enabled by ext hardware. */
+	.rate		= 12000000,
+	.flags		= CLOCK_IN_OMAP1510 | RATE_FIXED,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk mclk_16xx = {
+	.name		= "mclk",
+	/* Direct from ULPD, no parent. May be enabled by ext hardware. */
+	.flags		= CLOCK_IN_OMAP16XX,
+	.enable_reg	= (void __iomem *)COM_CLK_DIV_CTRL_SEL,
+	.enable_bit	= COM_ULPD_PLL_CLK_REQ,
+	.set_rate	= &omap1_set_ext_clk_rate,
+	.round_rate	= &omap1_round_ext_clk_rate,
+	.init		= &omap1_init_ext_clk,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk bclk_1510 = {
+	.name		= "bclk",
+	/* Direct from ULPD, no parent. May be enabled by ext hardware. */
+	.rate		= 12000000,
+	.flags		= CLOCK_IN_OMAP1510 | RATE_FIXED,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk bclk_16xx = {
+	.name		= "bclk",
+	/* Direct from ULPD, no parent. May be enabled by ext hardware. */
+	.flags		= CLOCK_IN_OMAP16XX,
+	.enable_reg	= (void __iomem *)SWD_CLK_DIV_CTRL_SEL,
+	.enable_bit	= SWD_ULPD_PLL_CLK_REQ,
+	.set_rate	= &omap1_set_ext_clk_rate,
+	.round_rate	= &omap1_round_ext_clk_rate,
+	.init		= &omap1_init_ext_clk,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk mmc1_ck = {
+	.name		= "mmc1_ck",
+	/* Functional clock is direct from ULPD, interface clock is ARMPER */
+	.parent		= &armper_ck.clk,
+	.rate		= 48000000,
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+	.enable_reg	= (void __iomem *)MOD_CONF_CTRL_0,
+	.enable_bit	= 23,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk mmc2_ck = {
+	.name		= "mmc2_ck",
+	/* Functional clock is direct from ULPD, interface clock is ARMPER */
+	.parent		= &armper_ck.clk,
+	.rate		= 48000000,
+	.flags		= CLOCK_IN_OMAP16XX |
+			  RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+	.enable_reg	= (void __iomem *)MOD_CONF_CTRL_0,
+	.enable_bit	= 20,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk virtual_ck_mpu = {
+	.name		= "mpu",
+	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+			  VIRTUAL_CLOCK | ALWAYS_ENABLED,
+	.parent		= &arm_ck, /* Is smarter alias for */
+	.recalc		= &followparent_recalc,
+	.set_rate	= &omap1_select_table_rate,
+	.round_rate	= &omap1_round_to_table_rate,
+	.enable		= &omap1_clk_enable,
+	.disable	= &omap1_clk_disable,
+};
+
+static struct clk * onchip_clks[] = {
+	/* non-ULPD clocks */
+	&ck_ref,
+	&ck_dpll1,
+	/* CK_GEN1 clocks */
+	&ck_dpll1out.clk,
+	&arm_ck,
+	&armper_ck.clk,
+	&arm_gpio_ck,
+	&armxor_ck.clk,
+	&armtim_ck.clk,
+	&armwdt_ck.clk,
+	&arminth_ck1510,  &arminth_ck16xx,
+	/* CK_GEN2 clocks */
+	&dsp_ck,
+	&dspmmu_ck,
+	&dspper_ck,
+	&dspxor_ck,
+	&dsptim_ck,
+	/* CK_GEN3 clocks */
+	&tc_ck.clk,
+	&tipb_ck,
+	&l3_ocpi_ck,
+	&tc1_ck,
+	&tc2_ck,
+	&dma_ck,
+	&dma_lcdfree_ck,
+	&api_ck.clk,
+	&lb_ck.clk,
+	&rhea1_ck,
+	&rhea2_ck,
+	&lcd_ck_16xx,
+	&lcd_ck_1510.clk,
+	/* ULPD clocks */
+	&uart1_1510,
+	&uart1_16xx.clk,
+	&uart2_ck,
+	&uart3_1510,
+	&uart3_16xx.clk,
+	&usb_clko,
+	&usb_hhc_ck1510, &usb_hhc_ck16xx,
+	&usb_dc_ck,
+	&mclk_1510,  &mclk_16xx,
+	&bclk_1510,  &bclk_16xx,
+	&mmc1_ck,
+	&mmc2_ck,
+	/* Virtual clocks */
+	&virtual_ck_mpu,
+};
+
+#endif
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 3c5d901..ecbc475 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -25,56 +25,7 @@
 #include <asm/arch/mux.h>
 #include <asm/arch/gpio.h>
 
-
-static void omap_nop_release(struct device *dev)
-{
-        /* Nothing */
-}
-
-/*-------------------------------------------------------------------------*/
-
-#if	defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
-
-#define	OMAP_I2C_BASE		0xfffb3800
-
-static struct resource i2c_resources[] = {
-	{
-		.start		= OMAP_I2C_BASE,
-		.end		= OMAP_I2C_BASE + 0x3f,
-		.flags		= IORESOURCE_MEM,
-	},
-	{
-		.start		= INT_I2C,
-		.flags		= IORESOURCE_IRQ,
-	},
-};
-
-/* DMA not used; works around erratum writing to non-empty i2c fifo */
-
-static struct platform_device omap_i2c_device = {
-        .name           = "i2c_omap",
-        .id             = -1,
-        .dev = {
-                .release        = omap_nop_release,
-        },
-	.num_resources	= ARRAY_SIZE(i2c_resources),
-	.resource	= i2c_resources,
-};
-
-static void omap_init_i2c(void)
-{
-	/* FIXME define and use a boot tag, in case of boards that
-	 * either don't wire up I2C, or chips that mux it differently...
-	 * it can include clocking and address info, maybe more.
-	 */
-	omap_cfg_reg(I2C_SCL);
-	omap_cfg_reg(I2C_SDA);
-
-	(void) platform_device_register(&omap_i2c_device);
-}
-#else
-static inline void omap_init_i2c(void) {}
-#endif
+extern void omap_nop_release(struct device *dev);
 
 /*-------------------------------------------------------------------------*/
 
@@ -110,137 +61,6 @@
 
 /*-------------------------------------------------------------------------*/
 
-#if	defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
-
-#define	OMAP_MMC1_BASE		0xfffb7800
-#define	OMAP_MMC2_BASE		0xfffb7c00	/* omap16xx only */
-
-static struct omap_mmc_conf mmc1_conf;
-
-static u64 mmc1_dmamask = 0xffffffff;
-
-static struct resource mmc1_resources[] = {
-	{
-		.start		= IO_ADDRESS(OMAP_MMC1_BASE),
-		.end		= IO_ADDRESS(OMAP_MMC1_BASE) + 0x7f,
-		.flags		= IORESOURCE_MEM,
-	},
-	{
-		.start		= INT_MMC,
-		.flags		= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device mmc_omap_device1 = {
-	.name		= "mmci-omap",
-	.id		= 1,
-	.dev = {
-		.release	= omap_nop_release,
-		.dma_mask	= &mmc1_dmamask,
-		.platform_data	= &mmc1_conf,
-	},
-	.num_resources	= ARRAY_SIZE(mmc1_resources),
-	.resource	= mmc1_resources,
-};
-
-#ifdef	CONFIG_ARCH_OMAP16XX
-
-static struct omap_mmc_conf mmc2_conf;
-
-static u64 mmc2_dmamask = 0xffffffff;
-
-static struct resource mmc2_resources[] = {
-	{
-		.start		= IO_ADDRESS(OMAP_MMC2_BASE),
-		.end		= IO_ADDRESS(OMAP_MMC2_BASE) + 0x7f,
-		.flags		= IORESOURCE_MEM,
-	},
-	{
-		.start		= INT_1610_MMC2,
-		.flags		= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device mmc_omap_device2 = {
-	.name		= "mmci-omap",
-	.id		= 2,
-	.dev = {
-		.release	= omap_nop_release,
-		.dma_mask	= &mmc2_dmamask,
-		.platform_data	= &mmc2_conf,
-	},
-	.num_resources	= ARRAY_SIZE(mmc2_resources),
-	.resource	= mmc2_resources,
-};
-#endif
-
-static void __init omap_init_mmc(void)
-{
-	const struct omap_mmc_config	*mmc_conf;
-	const struct omap_mmc_conf	*mmc;
-
-	/* NOTE:  assumes MMC was never (wrongly) enabled */
-	mmc_conf = omap_get_config(OMAP_TAG_MMC, struct omap_mmc_config);
-	if (!mmc_conf)
-		return;
-
-	/* block 1 is always available and has just one pinout option */
-	mmc = &mmc_conf->mmc[0];
-	if (mmc->enabled) {
-		omap_cfg_reg(MMC_CMD);
-		omap_cfg_reg(MMC_CLK);
-		omap_cfg_reg(MMC_DAT0);
-		if (cpu_is_omap1710()) {
-	              omap_cfg_reg(M15_1710_MMC_CLKI);
-	              omap_cfg_reg(P19_1710_MMC_CMDDIR);
-	              omap_cfg_reg(P20_1710_MMC_DATDIR0);
-	        }
-		if (mmc->wire4) {
-			omap_cfg_reg(MMC_DAT1);
-			/* NOTE:  DAT2 can be on W10 (here) or M15 */
-			if (!mmc->nomux)
-				omap_cfg_reg(MMC_DAT2);
-			omap_cfg_reg(MMC_DAT3);
-		}
-		mmc1_conf = *mmc;
-		(void) platform_device_register(&mmc_omap_device1);
-	}
-
-#ifdef	CONFIG_ARCH_OMAP16XX
-	/* block 2 is on newer chips, and has many pinout options */
-	mmc = &mmc_conf->mmc[1];
-	if (mmc->enabled) {
-		if (!mmc->nomux) {
-			omap_cfg_reg(Y8_1610_MMC2_CMD);
-			omap_cfg_reg(Y10_1610_MMC2_CLK);
-			omap_cfg_reg(R18_1610_MMC2_CLKIN);
-			omap_cfg_reg(W8_1610_MMC2_DAT0);
-			if (mmc->wire4) {
-				omap_cfg_reg(V8_1610_MMC2_DAT1);
-				omap_cfg_reg(W15_1610_MMC2_DAT2);
-				omap_cfg_reg(R10_1610_MMC2_DAT3);
-			}
-
-			/* These are needed for the level shifter */
-			omap_cfg_reg(V9_1610_MMC2_CMDDIR);
-			omap_cfg_reg(V5_1610_MMC2_DATDIR0);
-			omap_cfg_reg(W19_1610_MMC2_DATDIR1);
-		}
-
-		/* Feedback clock must be set on OMAP-1710 MMC2 */
-		if (cpu_is_omap1710())
-			omap_writel(omap_readl(MOD_CONF_CTRL_1) | (1 << 24),
-				     MOD_CONF_CTRL_1);
-		mmc2_conf = *mmc;
-		(void) platform_device_register(&mmc_omap_device2);
-	}
-#endif
-	return;
-}
-#else
-static inline void omap_init_mmc(void) {}
-#endif
-
 #if	defined(CONFIG_OMAP_RTC) || defined(CONFIG_OMAP_RTC)
 
 #define	OMAP_RTC_BASE		0xfffb4800
@@ -279,38 +99,6 @@
 static inline void omap_init_rtc(void) {}
 #endif
 
-/*-------------------------------------------------------------------------*/
-
-#if	defined(CONFIG_OMAP16XX_WATCHDOG) || defined(CONFIG_OMAP16XX_WATCHDOG_MODULE)
-
-#define	OMAP_WDT_BASE		0xfffeb000
-
-static struct resource wdt_resources[] = {
-	{
-		.start		= OMAP_WDT_BASE,
-		.end		= OMAP_WDT_BASE + 0x4f,
-		.flags		= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device omap_wdt_device = {
-	.name	   = "omap1610_wdt",
-	.id	     = -1,
-	.dev = {
-		.release	= omap_nop_release,
-	},
-	.num_resources	= ARRAY_SIZE(wdt_resources),
-	.resource	= wdt_resources,
-};
-
-static void omap_init_wdt(void)
-{
-	(void) platform_device_register(&omap_wdt_device);
-}
-#else
-static inline void omap_init_wdt(void) {}
-#endif
-
 
 /*-------------------------------------------------------------------------*/
 
@@ -334,18 +122,15 @@
  * may be handled by the boot loader, and drivers should expect it will
  * normally have been done by the time they're probed.
  */
-static int __init omap_init_devices(void)
+static int __init omap1_init_devices(void)
 {
 	/* please keep these calls, and their implementations above,
 	 * in alphabetical order so they're easier to sort through.
 	 */
-	omap_init_i2c();
 	omap_init_irda();
-	omap_init_mmc();
 	omap_init_rtc();
-	omap_init_wdt();
 
 	return 0;
 }
-arch_initcall(omap_init_devices);
+arch_initcall(omap1_init_devices);
 
diff --git a/arch/arm/mach-omap1/id.c b/arch/arm/mach-omap1/id.c
index 986c3b7..5c637c0 100644
--- a/arch/arm/mach-omap1/id.c
+++ b/arch/arm/mach-omap1/id.c
@@ -18,6 +18,13 @@
 
 #include <asm/io.h>
 
+#define OMAP_DIE_ID_0		0xfffe1800
+#define OMAP_DIE_ID_1		0xfffe1804
+#define OMAP_PRODUCTION_ID_0	0xfffe2000
+#define OMAP_PRODUCTION_ID_1	0xfffe2004
+#define OMAP32_ID_0		0xfffed400
+#define OMAP32_ID_1		0xfffed404
+
 struct omap_id {
 	u16	jtag_id;	/* Used to determine OMAP type */
 	u8	die_rev;	/* Processor revision */
@@ -27,6 +34,7 @@
 
 /* Register values to detect the OMAP version */
 static struct omap_id omap_ids[] __initdata = {
+	{ .jtag_id = 0xb574, .die_rev = 0x2, .omap_id = 0x03310315, .type = 0x03100000},
 	{ .jtag_id = 0x355f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300100},
 	{ .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300300},
 	{ .jtag_id = 0xb470, .die_rev = 0x0, .omap_id = 0x03310100, .type = 0x15100000},
@@ -164,6 +172,7 @@
 	case 0x07:
 		system_rev |= 0x07;
 		break;
+	case 0x03:
 	case 0x15:
 		system_rev |= 0x15;
 		break;
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 79fb865..a7a19f7 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -15,9 +15,10 @@
 
 #include <asm/mach/map.h>
 #include <asm/io.h>
+#include <asm/arch/mux.h>
 #include <asm/arch/tc.h>
 
-extern int clk_init(void);
+extern int omap1_clk_init(void);
 extern void omap_check_revision(void);
 extern void omap_sram_init(void);
 
@@ -50,7 +51,7 @@
 };
 #endif
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 static struct map_desc omap1510_io_desc[] __initdata = {
 	{
 		.virtual	= OMAP1510_DSP_BASE,
@@ -98,7 +99,7 @@
 		iotable_init(omap730_io_desc, ARRAY_SIZE(omap730_io_desc));
 	}
 #endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
 	}
@@ -119,7 +120,7 @@
 
 	/* Must init clocks early to assure that timer interrupt works
 	 */
-	clk_init();
+	omap1_clk_init();
 }
 
 /*
@@ -127,7 +128,9 @@
  */
 void __init omap_map_common_io(void)
 {
-	if (!initialized)
+	if (!initialized) {
 		_omap_map_io();
+		omap1_mux_init();
+	}
 }
 
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 192ce60..ed65a7d 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -47,6 +47,7 @@
 #include <asm/irq.h>
 #include <asm/mach/irq.h>
 #include <asm/arch/gpio.h>
+#include <asm/arch/cpu.h>
 
 #include <asm/io.h>
 
@@ -147,11 +148,15 @@
 };
 #endif
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 static struct omap_irq_bank omap1510_irq_banks[] = {
 	{ .base_reg = OMAP_IH1_BASE, 		.trigger_map = 0xb3febfff },
 	{ .base_reg = OMAP_IH2_BASE, 		.trigger_map = 0xffbfffed },
 };
+static struct omap_irq_bank omap310_irq_banks[] = {
+	{ .base_reg = OMAP_IH1_BASE, 		.trigger_map = 0xb3faefc3 },
+	{ .base_reg = OMAP_IH2_BASE, 		.trigger_map = 0x65b3c061 },
+};
 #endif
 
 #if defined(CONFIG_ARCH_OMAP16XX)
@@ -181,11 +186,15 @@
 		irq_bank_count = ARRAY_SIZE(omap730_irq_banks);
 	}
 #endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		irq_banks = omap1510_irq_banks;
 		irq_bank_count = ARRAY_SIZE(omap1510_irq_banks);
 	}
+	if (cpu_is_omap310()) {
+		irq_banks = omap310_irq_banks;
+		irq_bank_count = ARRAY_SIZE(omap310_irq_banks);
+	}
 #endif
 #if defined(CONFIG_ARCH_OMAP16XX)
 	if (cpu_is_omap16xx()) {
@@ -226,9 +235,11 @@
 	}
 
 	/* Unmask level 2 handler */
-	if (cpu_is_omap730()) {
+
+	if (cpu_is_omap730())
 		omap_unmask_irq(INT_730_IH2_IRQ);
-	} else {
-		omap_unmask_irq(INT_IH2_IRQ);
-	}
+	else if (cpu_is_omap1510())
+		omap_unmask_irq(INT_1510_IH2_IRQ);
+	else if (cpu_is_omap16xx())
+		omap_unmask_irq(INT_1610_IH2_IRQ);
 }
diff --git a/arch/arm/mach-omap1/leds-h2p2-debug.c b/arch/arm/mach-omap1/leds-h2p2-debug.c
index 399010c..6506508 100644
--- a/arch/arm/mach-omap1/leds-h2p2-debug.c
+++ b/arch/arm/mach-omap1/leds-h2p2-debug.c
@@ -18,6 +18,7 @@
 #include <asm/hardware.h>
 #include <asm/leds.h>
 #include <asm/system.h>
+#include <asm/mach-types.h>
 
 #include <asm/arch/fpga.h>
 #include <asm/arch/gpio.h>
@@ -63,14 +64,19 @@
 	case led_stop:
 	case led_halted:
 		/* all leds off during suspend or shutdown */
-		omap_set_gpio_dataout(GPIO_TIMER, 0);
-		omap_set_gpio_dataout(GPIO_IDLE, 0);
+
+		if (! machine_is_omap_perseus2()) {
+			omap_set_gpio_dataout(GPIO_TIMER, 0);
+			omap_set_gpio_dataout(GPIO_IDLE, 0);
+		}
+
 		__raw_writew(~0, &fpga->leds);
 		led_state &= ~LED_STATE_ENABLED;
 		if (evt == led_halted) {
 			iounmap(fpga);
 			fpga = NULL;
 		}
+
 		goto done;
 
 	case led_claim:
@@ -85,18 +91,37 @@
 #ifdef CONFIG_LEDS_TIMER
 	case led_timer:
 		led_state ^= LED_TIMER_ON;
-		omap_set_gpio_dataout(GPIO_TIMER, led_state & LED_TIMER_ON);
-		goto done;
+
+		if (machine_is_omap_perseus2())
+			hw_led_state ^= H2P2_DBG_FPGA_P2_LED_TIMER;
+		else {
+			omap_set_gpio_dataout(GPIO_TIMER, led_state & LED_TIMER_ON);
+			goto done;
+		}
+
+		break;
 #endif
 
 #ifdef CONFIG_LEDS_CPU
 	case led_idle_start:
-		omap_set_gpio_dataout(GPIO_IDLE, 1);
-		goto done;
+		if (machine_is_omap_perseus2())
+			hw_led_state |= H2P2_DBG_FPGA_P2_LED_IDLE;
+		else {
+			omap_set_gpio_dataout(GPIO_IDLE, 1);
+			goto done;
+		}
+
+		break;
 
 	case led_idle_end:
-		omap_set_gpio_dataout(GPIO_IDLE, 0);
-		goto done;
+		if (machine_is_omap_perseus2())
+			hw_led_state &= ~H2P2_DBG_FPGA_P2_LED_IDLE;
+		else {
+			omap_set_gpio_dataout(GPIO_IDLE, 0);
+			goto done;
+		}
+
+		break;
 #endif
 
 	case led_green_on:
@@ -135,7 +160,7 @@
 	/*
 	 *  Actually burn the LEDs
 	 */
-	if (led_state & LED_STATE_CLAIMED)
+	if (led_state & LED_STATE_ENABLED)
 		__raw_writew(~hw_led_state, &fpga->leds);
 
 done:
diff --git a/arch/arm/mach-omap1/leds.c b/arch/arm/mach-omap1/leds.c
index 5c6b1bb..3f9dcac 100644
--- a/arch/arm/mach-omap1/leds.c
+++ b/arch/arm/mach-omap1/leds.c
@@ -33,7 +33,6 @@
 
 	if (machine_is_omap_h2()
 			|| machine_is_omap_h3()
-			|| machine_is_omap_perseus2()
 #ifdef	CONFIG_OMAP_OSK_MISTRAL
 			|| machine_is_omap_osk()
 #endif
diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c
new file mode 100644
index 0000000..d4b8d62
--- /dev/null
+++ b/arch/arm/mach-omap1/mux.c
@@ -0,0 +1,289 @@
+/*
+ * linux/arch/arm/mach-omap1/mux.c
+ *
+ * OMAP1 pin multiplexing configurations
+ *
+ * Copyright (C) 2003 - 2005 Nokia Corporation
+ *
+ * Written by Tony Lindgren <tony.lindgren@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/spinlock.h>
+
+#include <asm/arch/mux.h>
+
+#ifdef CONFIG_OMAP_MUX
+
+#ifdef CONFIG_ARCH_OMAP730
+struct pin_config __initdata_or_module omap730_pins[] = {
+MUX_CFG_730("E2_730_KBR0",	12,   21,    0,	  0,   20,   1,	  NA,	 0,  0)
+MUX_CFG_730("J7_730_KBR1",	12,   25,    0,	  0,   24,   1,	  NA,	 0,  0)
+MUX_CFG_730("E1_730_KBR2",	12,   29,    0,	  0,   28,   1,	  NA,	 0,  0)
+MUX_CFG_730("F3_730_KBR3",	13,    1,    0,	  0,   0,    1,	  NA,	 0,  0)
+MUX_CFG_730("D2_730_KBR4",	13,    5,    0,	  0,   4,    1,	  NA,	 0,  0)
+MUX_CFG_730("C2_730_KBC0",	13,    9,    0,	  0,	8,   1,	  NA,	 0,  0)
+MUX_CFG_730("D3_730_KBC1",	13,   13,    0,	  0,   12,   1,	  NA,	 0,  0)
+MUX_CFG_730("E4_730_KBC2",	13,   17,    0,	  0,   16,   1,	  NA,	 0,  0)
+MUX_CFG_730("F4_730_KBC3",	13,   21,    0,	  0,   20,   1,	  NA,	 0,  0)
+MUX_CFG_730("E3_730_KBC4",	13,   25,    0,	  0,   24,   1,	  NA,	 0,  0)
+};
+#endif
+
+#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX)
+struct pin_config __initdata_or_module omap1xxx_pins[] = {
+/*
+ *	 description		mux  mode   mux	 pull pull  pull  pu_pd	 pu  dbg
+ *				reg  offset mode reg  bit   ena	  reg
+ */
+MUX_CFG("UART1_TX",		 9,   21,    1,	  2,   3,   0,	 NA,	 0,  0)
+MUX_CFG("UART1_RTS",		 9,   12,    1,	  2,   0,   0,	 NA,	 0,  0)
+
+/* UART2 (COM_UART_GATING), conflicts with USB2 */
+MUX_CFG("UART2_TX",		 C,   27,    1,	  3,   3,   0,	 NA,	 0,  0)
+MUX_CFG("UART2_RX",		 C,   18,    0,	  3,   1,   1,	 NA,	 0,  0)
+MUX_CFG("UART2_CTS",		 C,   21,    0,	  3,   1,   1,	 NA,	 0,  0)
+MUX_CFG("UART2_RTS",		 C,   24,    1,	  3,   2,   0,	 NA,	 0,  0)
+
+/* UART3 (GIGA_UART_GATING) */
+MUX_CFG("UART3_TX",		 6,    0,    1,	  0,  30,   0,	 NA,	 0,  0)
+MUX_CFG("UART3_RX",		 6,    3,    0,	  0,  31,   1,	 NA,	 0,  0)
+MUX_CFG("UART3_CTS",		 5,   12,    2,	  0,  24,   0,	 NA,	 0,  0)
+MUX_CFG("UART3_RTS",		 5,   15,    2,	  0,  25,   0,	 NA,	 0,  0)
+MUX_CFG("UART3_CLKREQ",		 9,   27,    0,	  2,   5,   0,	 NA,	 0,  0)
+MUX_CFG("UART3_BCLK",		 A,    0,    0,	  2,   6,   0,	 NA,	 0,  0)
+MUX_CFG("Y15_1610_UART3_RTS",	 A,    0,    1,	  2,   6,   0,	 NA,	 0,  0)
+
+/* PWT & PWL, conflicts with UART3 */
+MUX_CFG("PWT",		 	 6,    0,    2,	  0,  30,   0,	 NA,	 0,  0)
+MUX_CFG("PWL",		 	 6,    3,    1,	  0,  31,   1,	 NA,	 0,  0)
+
+/* USB internal master generic */
+MUX_CFG("R18_USB_VBUS",		 7,    9,    2,	  1,  11,   0,	 NA,	 0,  1)
+MUX_CFG("R18_1510_USB_GPIO0",	 7,    9,    0,	  1,  11,   1,	 NA,	 0,  1)
+/* works around erratum:  W4_USB_PUEN and W4_USB_PUDIS are switched! */
+MUX_CFG("W4_USB_PUEN",		 D,    3,    3,	  3,   5,   1,	 NA,	 0,  1)
+MUX_CFG("W4_USB_CLKO",		 D,    3,    1,	  3,   5,   0,	 NA,	 0,  1)
+MUX_CFG("W4_USB_HIGHZ",		 D,    3,    4,	  3,   5,   0,	  3,	 0,  1)
+MUX_CFG("W4_GPIO58",		 D,    3,    7,	  3,   5,   0,	  3,	 0,  1)
+
+/* USB1 master */
+MUX_CFG("USB1_SUSP",		 8,   27,    2,	  1,  27,   0,	 NA,	 0,  1)
+MUX_CFG("USB1_SE0",		 9,    0,    2,	  1,  28,   0,	 NA,	 0,  1)
+MUX_CFG("W13_1610_USB1_SE0",	 9,    0,    4,	  1,  28,   0,	 NA,	 0,  1)
+MUX_CFG("USB1_TXEN",		 9,    3,    2,	  1,  29,   0,	 NA,	 0,  1)
+MUX_CFG("USB1_TXD",		 9,   24,    1,	  2,   4,   0,	 NA,	 0,  1)
+MUX_CFG("USB1_VP",		 A,    3,    1,	  2,   7,   0,	 NA,	 0,  1)
+MUX_CFG("USB1_VM",		 A,    6,    1,	  2,   8,   0,	 NA,	 0,  1)
+MUX_CFG("USB1_RCV",		 A,    9,    1,	  2,   9,   0,	 NA,	 0,  1)
+MUX_CFG("USB1_SPEED",		 A,   12,    2,	  2,  10,   0,	 NA,	 0,  1)
+MUX_CFG("R13_1610_USB1_SPEED",	 A,   12,    5,	  2,  10,   0,	 NA,	 0,  1)
+MUX_CFG("R13_1710_USB1_SEO",	 A,   12,    5,   2,  10,   0,   NA,     0,  1)
+
+/* USB2 master */
+MUX_CFG("USB2_SUSP",		 B,    3,    1,	  2,  17,   0,	 NA,	 0,  1)
+MUX_CFG("USB2_VP",		 B,    6,    1,	  2,  18,   0,	 NA,	 0,  1)
+MUX_CFG("USB2_TXEN",		 B,    9,    1,	  2,  19,   0,	 NA,	 0,  1)
+MUX_CFG("USB2_VM",		 C,   18,    1,	  3,   0,   0,	 NA,	 0,  1)
+MUX_CFG("USB2_RCV",		 C,   21,    1,	  3,   1,   0,	 NA,	 0,  1)
+MUX_CFG("USB2_SE0",		 C,   24,    2,	  3,   2,   0,	 NA,	 0,  1)
+MUX_CFG("USB2_TXD",		 C,   27,    2,	  3,   3,   0,	 NA,	 0,  1)
+
+/* OMAP-1510 GPIO */
+MUX_CFG("R18_1510_GPIO0",	 7,    9,    0,   1,  11,   1,    0,     0,  1)
+MUX_CFG("R19_1510_GPIO1",	 7,    6,    0,   1,  10,   1,    0,     0,  1)
+MUX_CFG("M14_1510_GPIO2",	 7,    3,    0,   1,   9,   1,    0,     0,  1)
+
+/* OMAP1610 GPIO */
+MUX_CFG("P18_1610_GPIO3",	 7,    0,    0,   1,   8,   0,   NA,     0,  1)
+MUX_CFG("Y15_1610_GPIO17",	 A,    0,    7,   2,   6,   0,   NA,     0,  1)
+
+/* OMAP-1710 GPIO */
+MUX_CFG("R18_1710_GPIO0",        7,    9,    0,   1,  11,   1,    1,     1,  1)
+MUX_CFG("V2_1710_GPIO10",        F,   27,    1,   4,   3,   1,    4,     1,  1)
+MUX_CFG("N21_1710_GPIO14",       6,    9,    0,   1,   1,   1,    1,     1,  1)
+MUX_CFG("W15_1710_GPIO40",       9,   27,    7,   2,   5,   1,    2,     1,  1)
+
+/* MPUIO */
+MUX_CFG("MPUIO2",		 7,   18,    0,	  1,  14,   1,	 NA,	 0,  1)
+MUX_CFG("N15_1610_MPUIO2",	 7,   18,    0,	  1,  14,   1,	  1,	 0,  1)
+MUX_CFG("MPUIO4",		 7,   15,    0,	  1,  13,   1,	 NA,	 0,  1)
+MUX_CFG("MPUIO5",		 7,   12,    0,	  1,  12,   1,	 NA,	 0,  1)
+
+MUX_CFG("T20_1610_MPUIO5",	 7,   12,    0,	  1,  12,   0,	  3,	 0,  1)
+MUX_CFG("W11_1610_MPUIO6",	10,   15,    2,	  3,   8,   0,	  3,	 0,  1)
+MUX_CFG("V10_1610_MPUIO7",	 A,   24,    2,	  2,  14,   0,	  2,	 0,  1)
+MUX_CFG("W11_1610_MPUIO9",	10,   15,    1,	  3,   8,   0,	  3,	 0,  1)
+MUX_CFG("V10_1610_MPUIO10",	 A,   24,    1,	  2,  14,   0,	  2,	 0,  1)
+MUX_CFG("W10_1610_MPUIO11",	 A,   18,    2,	  2,  11,   0,	  2,	 0,  1)
+MUX_CFG("E20_1610_MPUIO13",	 3,   21,    1,	  0,   7,   0,	  0,	 0,  1)
+MUX_CFG("U20_1610_MPUIO14",	 9,    6,    6,	  0,  30,   0,	  0,	 0,  1)
+MUX_CFG("E19_1610_MPUIO15",	 3,   18,    1,	  0,   6,   0,	  0,	 0,  1)
+
+/* MCBSP2 */
+MUX_CFG("MCBSP2_CLKR",		 C,    6,    0,	  2,  27,   1,	 NA,	 0,  1)
+MUX_CFG("MCBSP2_CLKX",		 C,    9,    0,	  2,  29,   1,	 NA,	 0,  1)
+MUX_CFG("MCBSP2_DR",		 C,    0,    0,	  2,  26,   1,	 NA,	 0,  1)
+MUX_CFG("MCBSP2_DX",		 C,   15,    0,	  2,  31,   1,	 NA,	 0,  1)
+MUX_CFG("MCBSP2_FSR",		 C,   12,    0,	  2,  30,   1,	 NA,	 0,  1)
+MUX_CFG("MCBSP2_FSX",		 C,    3,    0,	  2,  27,   1,	 NA,	 0,  1)
+
+/* MCBSP3 NOTE: Mode must 1 for clock */
+MUX_CFG("MCBSP3_CLKX",		 9,    3,    1,	  1,  29,   0,	 NA,	 0,  1)
+
+/* Misc ballouts */
+MUX_CFG("BALLOUT_V8_ARMIO3",	 B,   18,    0,	  2,  25,   1,	 NA,	 0,  1)
+MUX_CFG("N20_HDQ",	       6,   18,    1,   1,   4,   0,    1,     4,  0)
+
+/* OMAP-1610 MMC2 */
+MUX_CFG("W8_1610_MMC2_DAT0",	 B,   21,    6,	  2,  23,   1,	  2,	 1,  1)
+MUX_CFG("V8_1610_MMC2_DAT1",	 B,   27,    6,	  2,  25,   1,	  2,	 1,  1)
+MUX_CFG("W15_1610_MMC2_DAT2",	 9,   12,    6,	  2,   5,   1,	  2,	 1,  1)
+MUX_CFG("R10_1610_MMC2_DAT3",	 B,   18,    6,	  2,  22,   1,	  2,	 1,  1)
+MUX_CFG("Y10_1610_MMC2_CLK",	 B,    3,    6,	  2,  17,   0,	  2,	 0,  1)
+MUX_CFG("Y8_1610_MMC2_CMD",	 B,   24,    6,	  2,  24,   1,	  2,	 1,  1)
+MUX_CFG("V9_1610_MMC2_CMDDIR",	 B,   12,    6,	  2,  20,   0,	  2,	 1,  1)
+MUX_CFG("V5_1610_MMC2_DATDIR0",	 B,   15,    6,	  2,  21,   0,	  2,	 1,  1)
+MUX_CFG("W19_1610_MMC2_DATDIR1", 8,   15,    6,	  1,  23,   0,	  1,	 1,  1)
+MUX_CFG("R18_1610_MMC2_CLKIN",	 7,    9,    6,	  1,  11,   0,	  1,	11,  1)
+
+/* OMAP-1610 External Trace Interface */
+MUX_CFG("M19_1610_ETM_PSTAT0",	 5,   27,    1,	  0,  29,   0,	  0,	 0,  1)
+MUX_CFG("L15_1610_ETM_PSTAT1",	 5,   24,    1,	  0,  28,   0,	  0,	 0,  1)
+MUX_CFG("L18_1610_ETM_PSTAT2",	 5,   21,    1,	  0,  27,   0,	  0,	 0,  1)
+MUX_CFG("L19_1610_ETM_D0",	 5,   18,    1,	  0,  26,   0,	  0,	 0,  1)
+MUX_CFG("J19_1610_ETM_D6",	 5,    0,    1,	  0,  20,   0,	  0,	 0,  1)
+MUX_CFG("J18_1610_ETM_D7",	 5,   27,    1,	  0,  19,   0,	  0,	 0,  1)
+
+/* OMAP16XX GPIO */
+MUX_CFG("P20_1610_GPIO4",	 6,   27,    0,	  1,   7,   0,	  1,	 1,  1)
+MUX_CFG("V9_1610_GPIO7",	 B,   12,    1,	  2,  20,   0,	  2,	 1,  1)
+MUX_CFG("W8_1610_GPIO9",	 B,   21,    0,	  2,  23,   0,	  2,	 1,  1)
+MUX_CFG("N20_1610_GPIO11",       6,   18,    0,   1,   4,   0,    1,     1,  1)
+MUX_CFG("N19_1610_GPIO13",	 6,   12,    0,	  1,   2,   0,	  1,	 1,  1)
+MUX_CFG("P10_1610_GPIO22",	 C,    0,    7,	  2,  26,   0,	  2,	 1,  1)
+MUX_CFG("V5_1610_GPIO24",	 B,   15,    7,	  2,  21,   0,	  2,	 1,  1)
+MUX_CFG("AA20_1610_GPIO_41",	 9,    9,    7,	  1,  31,   0,	  1,	 1,  1)
+MUX_CFG("W19_1610_GPIO48",	 8,   15,    7,   1,  23,   1,    1,     0,  1)
+MUX_CFG("M7_1610_GPIO62",	10,    0,    0,   4,  24,   0,    4,     0,  1)
+MUX_CFG("V14_16XX_GPIO37",	 9,   18,    7,	  2,   2,   0,	  2,	 2,  0)
+MUX_CFG("R9_16XX_GPIO18",	 C,   18,    7,   3,   0,   0,    3,     0,  0)
+MUX_CFG("L14_16XX_GPIO49",	 6,    3,    7,   0,  31,   0,    0,    31,  0)
+
+/* OMAP-1610 uWire */
+MUX_CFG("V19_1610_UWIRE_SCLK",	 8,    6,    0,	  1,  20,   0,	  1,	 1,  1)
+MUX_CFG("U18_1610_UWIRE_SDI",	 8,    0,    0,	  1,  18,   0,	  1,	 1,  1)
+MUX_CFG("W21_1610_UWIRE_SDO",	 8,    3,    0,	  1,  19,   0,	  1,	 1,  1)
+MUX_CFG("N14_1610_UWIRE_CS0",	 8,    9,    1,	  1,  21,   0,	  1,	 1,  1)
+MUX_CFG("P15_1610_UWIRE_CS3",	 8,   12,    1,	  1,  22,   0,	  1,	 1,  1)
+MUX_CFG("N15_1610_UWIRE_CS1",	 7,   18,    2,	  1,  14,   0,	 NA,	 0,  1)
+
+/* OMAP-1610 Flash */
+MUX_CFG("L3_1610_FLASH_CS2B_OE",10,    6,    1,	 NA,   0,   0,	 NA,	 0,  1)
+MUX_CFG("M8_1610_FLASH_CS2B_WE",10,    3,    1,	 NA,   0,   0,	 NA,	 0,  1)
+
+/* First MMC interface, same on 1510, 1610 and 1710 */
+MUX_CFG("MMC_CMD",		 A,   27,    0,	  2,  15,   1,	  2,	 1,  1)
+MUX_CFG("MMC_DAT1",		 A,   24,    0,	  2,  14,   1,	  2,	 1,  1)
+MUX_CFG("MMC_DAT2",		 A,   18,    0,	  2,  12,   1,	  2,	 1,  1)
+MUX_CFG("MMC_DAT0",		 B,    0,    0,	  2,  16,   1,	  2,	 1,  1)
+MUX_CFG("MMC_CLK",		 A,   21,    0,	 NA,   0,   0,	 NA,	 0,  1)
+MUX_CFG("MMC_DAT3",		10,   15,    0,	  3,   8,   1,	  3,	 1,  1)
+MUX_CFG("M15_1710_MMC_CLKI",	 6,   21,    2,   0,   0,   0,   NA,     0,  1)
+MUX_CFG("P19_1710_MMC_CMDDIR",	 6,   24,    6,   0,   0,   0,   NA,     0,  1)
+MUX_CFG("P20_1710_MMC_DATDIR0",	 6,   27,    5,   0,   0,   0,   NA,     0,  1)
+
+/* OMAP-1610 USB0 alternate configuration */
+MUX_CFG("W9_USB0_TXEN",		 B,   9,     5,	  2,  19,   0,	  2,	 0,  1)
+MUX_CFG("AA9_USB0_VP",		 B,   6,     5,	  2,  18,   0,	  2,	 0,  1)
+MUX_CFG("Y5_USB0_RCV",		 C,  21,     5,	  3,   1,   0,	  1,	 0,  1)
+MUX_CFG("R9_USB0_VM",		 C,  18,     5,	  3,   0,   0,	  3,	 0,  1)
+MUX_CFG("V6_USB0_TXD",		 C,  27,     5,	  3,   3,   0,	  3,	 0,  1)
+MUX_CFG("W5_USB0_SE0",		 C,  24,     5,	  3,   2,   0,	  3,	 0,  1)
+MUX_CFG("V9_USB0_SPEED",	 B,  12,     5,	  2,  20,   0,	  2,	 0,  1)
+MUX_CFG("Y10_USB0_SUSP",	 B,   3,     5,	  2,  17,   0,	  2,	 0,  1)
+
+/* USB2 interface */
+MUX_CFG("W9_USB2_TXEN",		 B,   9,     1,	 NA,   0,   0,	 NA,	 0,  1)
+MUX_CFG("AA9_USB2_VP",		 B,   6,     1,	 NA,   0,   0,	 NA,	 0,  1)
+MUX_CFG("Y5_USB2_RCV",		 C,  21,     1,	 NA,   0,   0,	 NA,	 0,  1)
+MUX_CFG("R9_USB2_VM",		 C,  18,     1,	 NA,   0,   0,	 NA,	 0,  1)
+MUX_CFG("V6_USB2_TXD",		 C,  27,     2,	 NA,   0,   0,	 NA,	 0,  1)
+MUX_CFG("W5_USB2_SE0",		 C,  24,     2,	 NA,   0,   0,	 NA,	 0,  1)
+
+/* 16XX UART */
+MUX_CFG("R13_1610_UART1_TX",	 A,  12,     6,	  2,  10,   0,	  2,	10,  1)
+MUX_CFG("V14_16XX_UART1_RX",	 9,  18,     0,	  2,   2,   0,	  2,	 2,  1)
+MUX_CFG("R14_1610_UART1_CTS",	 9,  15,     0,	  2,   1,   0,	  2,	 1,  1)
+MUX_CFG("AA15_1610_UART1_RTS",	 9,  12,     1,	  2,   0,   0,	  2,	 0,  1)
+MUX_CFG("R9_16XX_UART2_RX",	 C,  18,     0,   3,   0,   0,    3,     0,  1)
+MUX_CFG("L14_16XX_UART3_RX",	 6,   3,     0,   0,  31,   0,    0,    31,  1)
+
+/* I2C interface */
+MUX_CFG("I2C_SCL",		 7,  24,     0,	 NA,   0,   0,	 NA,	 0,  0)
+MUX_CFG("I2C_SDA",		 7,  27,     0,	 NA,   0,   0,	 NA,	 0,  0)
+
+/* Keypad */
+MUX_CFG("F18_1610_KBC0",	 3,  15,     0,	  0,   5,   1,	  0,	 0,  0)
+MUX_CFG("D20_1610_KBC1",	 3,  12,     0,	  0,   4,   1,	  0,	 0,  0)
+MUX_CFG("D19_1610_KBC2",	 3,   9,     0,	  0,   3,   1,	  0,	 0,  0)
+MUX_CFG("E18_1610_KBC3",	 3,   6,     0,	  0,   2,   1,	  0,	 0,  0)
+MUX_CFG("C21_1610_KBC4",	 3,   3,     0,	  0,   1,   1,	  0,	 0,  0)
+MUX_CFG("G18_1610_KBR0",	 4,   0,     0,	  0,   10,  1,	  0,	 1,  0)
+MUX_CFG("F19_1610_KBR1",	 3,   27,    0,	  0,   9,   1,	  0,	 1,  0)
+MUX_CFG("H14_1610_KBR2",	 3,   24,    0,	  0,   8,   1,	  0,	 1,  0)
+MUX_CFG("E20_1610_KBR3",	 3,   21,    0,	  0,   7,   1,	  0,	 1,  0)
+MUX_CFG("E19_1610_KBR4",	 3,   18,    0,	  0,   6,   1,	  0,	 1,  0)
+MUX_CFG("N19_1610_KBR5",	 6,  12,     1,	  1,   2,   1,	  1,	 1,  0)
+
+/* Power management */
+MUX_CFG("T20_1610_LOW_PWR",	 7,   12,    1,	  NA,   0,   0,   NA,	 0,  0)
+
+/* MCLK Settings */
+MUX_CFG("V5_1710_MCLK_ON",	 B,   15,    0,	  NA,   0,   0,   NA,	 0,  0)
+MUX_CFG("V5_1710_MCLK_OFF",	 B,   15,    6,	  NA,   0,   0,   NA,	 0,  0)
+MUX_CFG("R10_1610_MCLK_ON",	 B,   18,    0,	  NA,  22,   0,	  NA,	 1,  0)
+MUX_CFG("R10_1610_MCLK_OFF",	 B,   18,    6,	  2,   22,   1,	  2,	 1,  1)
+
+/* CompactFlash controller, conflicts with MMC1 */
+MUX_CFG("P11_1610_CF_CD2",	 A,   27,    3,	  2,   15,   1,	  2,	 1,  1)
+MUX_CFG("R11_1610_CF_IOIS16",	 B,    0,    3,	  2,   16,   1,	  2,	 1,  1)
+MUX_CFG("V10_1610_CF_IREQ",	 A,   24,    3,	  2,   14,   0,	  2,	 0,  1)
+MUX_CFG("W10_1610_CF_RESET",	 A,   18,    3,	  2,   12,   1,	  2,	 1,  1)
+MUX_CFG("W11_1610_CF_CD1",	10,   15,    3,	  3,    8,   1,	  3,	 1,  1)
+};
+#endif	/* CONFIG_ARCH_OMAP15XX || CONFIG_ARCH_OMAP16XX */
+
+int __init omap1_mux_init(void)
+{
+
+#ifdef CONFIG_ARCH_OMAP730
+	omap_mux_register(omap730_pins, ARRAY_SIZE(omap730_pins));
+#endif
+
+#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX)
+	omap_mux_register(omap1xxx_pins, ARRAY_SIZE(omap1xxx_pins));
+#endif
+
+	return 0;
+}
+
+#endif
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index 40c4f7c4..6810cfb 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -109,9 +109,10 @@
  * By default UART2 does not work on Innovator-1510 if you have
  * USB OHCI enabled. To use UART2, you must disable USB2 first.
  */
-void __init omap_serial_init(int ports[OMAP_MAX_NR_PORTS])
+void __init omap_serial_init(void)
 {
 	int i;
+	const struct omap_uart_config *info;
 
 	if (cpu_is_omap730()) {
 		serial_platform_data[0].regshift = 0;
@@ -126,10 +127,14 @@
 		serial_platform_data[2].uartclk = OMAP1510_BASE_BAUD * 16;
 	}
 
+	info = omap_get_config(OMAP_TAG_UART, struct omap_uart_config);
+	if (info == NULL)
+		return;
+
 	for (i = 0; i < OMAP_MAX_NR_PORTS; i++) {
 		unsigned char reg;
 
-		if (ports[i] == 0) {
+		if (!((1 << i) & info->enabled_uarts)) {
 			serial_platform_data[i].membase = NULL;
 			serial_platform_data[i].mapbase = 0;
 			continue;
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 191a9b1..cdbf4d7 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -226,8 +226,8 @@
 
 #ifdef CONFIG_OMAP_32K_TIMER
 
-#ifdef CONFIG_ARCH_OMAP1510
-#error OMAP 32KHz timer does not currently work on 1510!
+#ifdef CONFIG_ARCH_OMAP15XX
+#error OMAP 32KHz timer does not currently work on 15XX!
 #endif
 
 /*
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
new file mode 100644
index 0000000..5788809
--- /dev/null
+++ b/arch/arm/mach-omap2/Kconfig
@@ -0,0 +1,22 @@
+comment "OMAP Core Type"
+	depends on ARCH_OMAP2
+
+config ARCH_OMAP24XX
+	bool "OMAP24xx Based System"
+	depends on ARCH_OMAP2
+
+config ARCH_OMAP2420
+	bool "OMAP2420 support"
+	depends on ARCH_OMAP24XX
+
+comment "OMAP Board Type"
+	depends on ARCH_OMAP2
+
+config MACH_OMAP_GENERIC
+	bool "Generic OMAP board"
+	depends on ARCH_OMAP2 && ARCH_OMAP24XX
+
+config MACH_OMAP_H4
+	bool "OMAP 2420 H4 board"
+	depends on ARCH_OMAP2 && ARCH_OMAP24XX
+
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
new file mode 100644
index 0000000..4204116
--- /dev/null
+++ b/arch/arm/mach-omap2/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the linux kernel.
+#
+
+# Common support
+obj-y := irq.o id.o io.o sram-fn.o clock.o mux.o devices.o serial.o
+
+obj-$(CONFIG_OMAP_MPU_TIMER)		+= timer-gp.o
+
+# Specific board support
+obj-$(CONFIG_MACH_OMAP_GENERIC)		+= board-generic.o
+obj-$(CONFIG_MACH_OMAP_H4)		+= board-h4.o
+
diff --git a/arch/arm/mach-omap2/Makefile.boot b/arch/arm/mach-omap2/Makefile.boot
new file mode 100644
index 0000000..565aff7
--- /dev/null
+++ b/arch/arm/mach-omap2/Makefile.boot
@@ -0,0 +1,3 @@
+  zreladdr-y		:= 0x80008000
+params_phys-y		:= 0x80000100
+initrd_phys-y		:= 0x80800000
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
new file mode 100644
index 0000000..c602e7a
--- /dev/null
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -0,0 +1,80 @@
+/*
+ * linux/arch/arm/mach-omap/omap2/board-generic.c
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * Modified from mach-omap/omap1/board-generic.c
+ *
+ * Code for generic OMAP2 board. Should work on many OMAP2 systems where
+ * the bootloader passes the board-specific data to the kernel.
+ * Do not put any board specific code to this file; create a new machine
+ * type if you need custom low-level initializations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/usb.h>
+#include <asm/arch/board.h>
+#include <asm/arch/common.h>
+
+static void __init omap_generic_init_irq(void)
+{
+	omap_init_irq();
+}
+
+static struct omap_uart_config generic_uart_config __initdata = {
+	.enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_mmc_config generic_mmc_config __initdata = {
+	.mmc [0] = {
+		.enabled 	= 0,
+		.wire4		= 0,
+		.wp_pin		= -1,
+		.power_pin	= -1,
+		.switch_pin	= -1,
+	},
+};
+
+static struct omap_board_config_kernel generic_config[] = {
+	{ OMAP_TAG_UART,	&generic_uart_config },
+	{ OMAP_TAG_MMC,		&generic_mmc_config },
+};
+
+static void __init omap_generic_init(void)
+{
+	omap_board_config = generic_config;
+	omap_board_config_size = ARRAY_SIZE(generic_config);
+	omap_serial_init();
+}
+
+static void __init omap_generic_map_io(void)
+{
+	omap_map_common_io();
+}
+
+MACHINE_START(OMAP_GENERIC, "Generic OMAP24xx")
+	/* Maintainer: Paul Mundt <paul.mundt@nokia.com> */
+	.phys_ram	= 0x80000000,
+	.phys_io	= 0x48000000,
+	.io_pg_offst	= ((0xd8000000) >> 18) & 0xfffc,
+	.boot_params	= 0x80000100,
+	.map_io		= omap_generic_map_io,
+	.init_irq	= omap_generic_init_irq,
+	.init_machine	= omap_generic_init,
+	.timer		= &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
new file mode 100644
index 0000000..f255446
--- /dev/null
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -0,0 +1,197 @@
+/*
+ * linux/arch/arm/mach-omap/omap2/board-h4.c
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * Modified from mach-omap/omap1/board-generic.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/usb.h>
+#include <asm/arch/board.h>
+#include <asm/arch/common.h>
+#include <asm/arch/prcm.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+
+static struct mtd_partition h4_partitions[] = {
+	/* bootloader (U-Boot, etc) in first sector */
+	{
+	      .name		= "bootloader",
+	      .offset		= 0,
+	      .size		= SZ_128K,
+	      .mask_flags	= MTD_WRITEABLE, /* force read-only */
+	},
+	/* bootloader params in the next sector */
+	{
+	      .name		= "params",
+	      .offset		= MTDPART_OFS_APPEND,
+	      .size		= SZ_128K,
+	      .mask_flags	= 0,
+	},
+	/* kernel */
+	{
+	      .name		= "kernel",
+	      .offset		= MTDPART_OFS_APPEND,
+	      .size		= SZ_2M,
+	      .mask_flags	= 0
+	},
+	/* file system */
+	{
+	      .name		= "filesystem",
+	      .offset		= MTDPART_OFS_APPEND,
+	      .size		= MTDPART_SIZ_FULL,
+	      .mask_flags	= 0
+	}
+};
+
+static struct flash_platform_data h4_flash_data = {
+	.map_name	= "cfi_probe",
+	.width		= 2,
+	.parts		= h4_partitions,
+	.nr_parts	= ARRAY_SIZE(h4_partitions),
+};
+
+static struct resource h4_flash_resource = {
+	.start		= H4_CS0_BASE,
+	.end		= H4_CS0_BASE + SZ_64M - 1,
+	.flags		= IORESOURCE_MEM,
+};
+
+static struct platform_device h4_flash_device = {
+	.name		= "omapflash",
+	.id		= 0,
+	.dev		= {
+		.platform_data	= &h4_flash_data,
+	},
+	.num_resources	= 1,
+	.resource	= &h4_flash_resource,
+};
+
+static struct resource h4_smc91x_resources[] = {
+	[0] = {
+		.start  = OMAP24XX_ETHR_START,          /* Physical */
+		.end    = OMAP24XX_ETHR_START + 0xf,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = OMAP_GPIO_IRQ(OMAP24XX_ETHR_GPIO_IRQ),
+		.end    = OMAP_GPIO_IRQ(OMAP24XX_ETHR_GPIO_IRQ),
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device h4_smc91x_device = {
+	.name		= "smc91x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(h4_smc91x_resources),
+	.resource	= h4_smc91x_resources,
+};
+
+static struct platform_device *h4_devices[] __initdata = {
+	&h4_smc91x_device,
+	&h4_flash_device,
+};
+
+static inline void __init h4_init_smc91x(void)
+{
+	/* Make sure CS1 timings are correct */
+	GPMC_CONFIG1_1 = 0x00011200;
+	GPMC_CONFIG2_1 = 0x001f1f01;
+	GPMC_CONFIG3_1 = 0x00080803;
+	GPMC_CONFIG4_1 = 0x1c091c09;
+	GPMC_CONFIG5_1 = 0x041f1f1f;
+	GPMC_CONFIG6_1 = 0x000004c4;
+	GPMC_CONFIG7_1 = 0x00000f40 | (0x08000000 >> 24);
+	udelay(100);
+
+	omap_cfg_reg(M15_24XX_GPIO92);
+	if (omap_request_gpio(OMAP24XX_ETHR_GPIO_IRQ) < 0) {
+		printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n",
+			OMAP24XX_ETHR_GPIO_IRQ);
+		return;
+	}
+	omap_set_gpio_direction(OMAP24XX_ETHR_GPIO_IRQ, 1);
+}
+
+static void __init omap_h4_init_irq(void)
+{
+	omap_init_irq();
+	omap_gpio_init();
+	h4_init_smc91x();
+}
+
+static struct omap_uart_config h4_uart_config __initdata = {
+	.enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_mmc_config h4_mmc_config __initdata = {
+	.mmc [0] = {
+		.enabled	= 1,
+		.wire4		= 1,
+		.wp_pin		= -1,
+		.power_pin	= -1,
+		.switch_pin	= -1,
+	},
+};
+
+static struct omap_lcd_config h4_lcd_config __initdata = {
+	.panel_name	= "h4",
+	.ctrl_name	= "internal",
+};
+
+static struct omap_board_config_kernel h4_config[] = {
+	{ OMAP_TAG_UART,	&h4_uart_config },
+	{ OMAP_TAG_MMC,		&h4_mmc_config },
+	{ OMAP_TAG_LCD,		&h4_lcd_config },
+};
+
+static void __init omap_h4_init(void)
+{
+	/*
+	 * Make sure the serial ports are muxed on at this point.
+	 * You have to mux them off in device drivers later on
+	 * if not needed.
+	 */
+	platform_add_devices(h4_devices, ARRAY_SIZE(h4_devices));
+	omap_board_config = h4_config;
+	omap_board_config_size = ARRAY_SIZE(h4_config);
+	omap_serial_init();
+}
+
+static void __init omap_h4_map_io(void)
+{
+	omap_map_common_io();
+}
+
+MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
+	/* Maintainer: Paul Mundt <paul.mundt@nokia.com> */
+	.phys_ram	= 0x80000000,
+	.phys_io	= 0x48000000,
+	.io_pg_offst	= ((0xd8000000) >> 18) & 0xfffc,
+	.boot_params	= 0x80000100,
+	.map_io		= omap_h4_map_io,
+	.init_irq	= omap_h4_init_irq,
+	.init_machine	= omap_h4_init,
+	.timer		= &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
new file mode 100644
index 0000000..85818d9
--- /dev/null
+++ b/arch/arm/mach-omap2/clock.c
@@ -0,0 +1,1129 @@
+/*
+ *  linux/arch/arm/mach-omap2/clock.c
+ *
+ *  Copyright (C) 2005 Texas Instruments Inc.
+ *  Richard Woodruff <r-woodruff2@ti.com>
+ *  Created for OMAP2.
+ *
+ *  Cleaned up and modified to use omap shared clock framework by
+ *  Tony Lindgren <tony@atomide.com>
+ *
+ *  Based on omap1 clock.c, Copyright (C) 2004 - 2005 Nokia corporation
+ *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+
+#include <asm/hardware/clock.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/sram.h>
+#include <asm/arch/prcm.h>
+
+#include "clock.h"
+
+//#define DOWN_VARIABLE_DPLL 1			/* Experimental */
+
+static struct prcm_config *curr_prcm_set;
+static struct memory_timings mem_timings;
+static u32 curr_perf_level = PRCM_FULL_SPEED;
+
+/*-------------------------------------------------------------------------
+ * Omap2 specific clock functions
+ *-------------------------------------------------------------------------*/
+
+/* Recalculate SYST_CLK */
+static void omap2_sys_clk_recalc(struct clk * clk)
+{
+	u32 div = PRCM_CLKSRC_CTRL;
+	div &= (1 << 7) | (1 << 6);	/* Test if ext clk divided by 1 or 2 */
+	div >>= clk->rate_offset;
+	clk->rate = (clk->parent->rate / div);
+	propagate_rate(clk);
+}
+
+static u32 omap2_get_dpll_rate(struct clk * tclk)
+{
+	int dpll_clk, dpll_mult, dpll_div, amult;
+
+	dpll_mult = (CM_CLKSEL1_PLL >> 12) & 0x03ff;	/* 10 bits */
+	dpll_div = (CM_CLKSEL1_PLL >> 8) & 0x0f;	/* 4 bits */
+	dpll_clk = (tclk->parent->rate * dpll_mult) / (dpll_div + 1);
+	amult = CM_CLKSEL2_PLL & 0x3;
+	dpll_clk *= amult;
+
+	return dpll_clk;
+}
+
+static void omap2_followparent_recalc(struct clk *clk)
+{
+	followparent_recalc(clk);
+}
+
+static void omap2_propagate_rate(struct clk * clk)
+{
+	if (!(clk->flags & RATE_FIXED))
+		clk->rate = clk->parent->rate;
+
+	propagate_rate(clk);
+}
+
+/* Enable an APLL if off */
+static void omap2_clk_fixed_enable(struct clk *clk)
+{
+	u32 cval, i=0;
+
+	if (clk->enable_bit == 0xff)			/* Parent will do it */
+		return;
+
+	cval = CM_CLKEN_PLL;
+
+	if ((cval & (0x3 << clk->enable_bit)) == (0x3 << clk->enable_bit))
+		return;
+
+	cval &= ~(0x3 << clk->enable_bit);
+	cval |= (0x3 << clk->enable_bit);
+	CM_CLKEN_PLL = cval;
+
+	if (clk == &apll96_ck)
+		cval = (1 << 8);
+	else if (clk == &apll54_ck)
+		cval = (1 << 6);
+
+	while (!CM_IDLEST_CKGEN & cval) {		/* Wait for lock */
+		++i;
+		udelay(1);
+		if (i == 100000)
+			break;
+	}
+}
+
+/* Enables clock without considering parent dependencies or use count
+ * REVISIT: Maybe change this to use clk->enable like on omap1?
+ */
+static int omap2_clk_enable(struct clk * clk)
+{
+	u32 regval32;
+
+	if (clk->flags & ALWAYS_ENABLED)
+		return 0;
+
+	if (unlikely(clk->enable_reg == 0)) {
+		printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
+		       clk->name);
+		return 0;
+	}
+
+	if (clk->enable_reg == (void __iomem *)&CM_CLKEN_PLL) {
+		omap2_clk_fixed_enable(clk);
+		return 0;
+	}
+
+	regval32 = __raw_readl(clk->enable_reg);
+	regval32 |= (1 << clk->enable_bit);
+	__raw_writel(regval32, clk->enable_reg);
+
+	return 0;
+}
+
+/* Stop APLL */
+static void omap2_clk_fixed_disable(struct clk *clk)
+{
+	u32 cval;
+
+	if(clk->enable_bit == 0xff)		/* let parent off do it */
+		return;
+
+	cval = CM_CLKEN_PLL;
+	cval &= ~(0x3 << clk->enable_bit);
+	CM_CLKEN_PLL = cval;
+}
+
+/* Disables clock without considering parent dependencies or use count */
+static void omap2_clk_disable(struct clk *clk)
+{
+	u32 regval32;
+
+	if (clk->enable_reg == 0)
+		return;
+
+	if (clk->enable_reg == (void __iomem *)&CM_CLKEN_PLL) {
+		omap2_clk_fixed_disable(clk);
+		return;
+	}
+
+	regval32 = __raw_readl(clk->enable_reg);
+	regval32 &= ~(1 << clk->enable_bit);
+	__raw_writel(regval32, clk->enable_reg);
+}
+
+static int omap2_clk_use(struct clk *clk)
+{
+	int ret = 0;
+
+	if (clk->usecount++ == 0) {
+		if (likely((u32)clk->parent))
+			ret = omap2_clk_use(clk->parent);
+
+		if (unlikely(ret != 0)) {
+			clk->usecount--;
+			return ret;
+		}
+
+		ret = omap2_clk_enable(clk);
+
+		if (unlikely(ret != 0) && clk->parent) {
+			omap2_clk_unuse(clk->parent);
+			clk->usecount--;
+		}
+	}
+
+	return ret;
+}
+
+static void omap2_clk_unuse(struct clk *clk)
+{
+	if (clk->usecount > 0 && !(--clk->usecount)) {
+		omap2_clk_disable(clk);
+		if (likely((u32)clk->parent))
+			omap2_clk_unuse(clk->parent);
+	}
+}
+
+/*
+ * Uses the current prcm set to tell if a rate is valid.
+ * You can go slower, but not faster within a given rate set.
+ */
+static u32 omap2_dpll_round_rate(unsigned long target_rate)
+{
+	u32 high, low;
+
+	if ((CM_CLKSEL2_PLL & 0x3) == 1) {	/* DPLL clockout */
+		high = curr_prcm_set->dpll_speed * 2;
+		low = curr_prcm_set->dpll_speed;
+	} else {				/* DPLL clockout x 2 */
+		high = curr_prcm_set->dpll_speed;
+		low = curr_prcm_set->dpll_speed / 2;
+	}
+
+#ifdef DOWN_VARIABLE_DPLL
+	if (target_rate > high)
+		return high;
+	else
+		return target_rate;
+#else
+	if (target_rate > low)
+		return high;
+	else
+		return low;
+#endif
+
+}
+
+/*
+ * Used for clocks that are part of CLKSEL_xyz governed clocks.
+ * REVISIT: Maybe change to use clk->enable() functions like on omap1?
+ */
+static void omap2_clksel_recalc(struct clk * clk)
+{
+	u32 fixed = 0, div = 0;
+
+	if (clk == &dpll_ck) {
+		clk->rate = omap2_get_dpll_rate(clk);
+		fixed = 1;
+		div = 0;
+	}
+
+	if (clk == &iva1_mpu_int_ifck) {
+		div = 2;
+		fixed = 1;
+	}
+
+	if ((clk == &dss1_fck) && ((CM_CLKSEL1_CORE & (0x1f << 8)) == 0)) {
+		clk->rate = sys_ck.rate;
+		return;
+	}
+
+	if (!fixed) {
+		div = omap2_clksel_get_divisor(clk);
+		if (div == 0)
+			return;
+	}
+
+	if (div != 0) {
+		if (unlikely(clk->rate == clk->parent->rate / div))
+			return;
+		clk->rate = clk->parent->rate / div;
+	}
+
+	if (unlikely(clk->flags & RATE_PROPAGATES))
+		propagate_rate(clk);
+}
+
+/*
+ * Finds best divider value in an array based on the source and target
+ * rates. The divider array must be sorted with smallest divider first.
+ */
+static inline u32 omap2_divider_from_table(u32 size, u32 *div_array,
+					   u32 src_rate, u32 tgt_rate)
+{
+	int i, test_rate;
+
+	if (div_array == NULL)
+		return ~1;
+
+	for (i=0; i < size; i++) {
+		test_rate = src_rate / *div_array;
+		if (test_rate <= tgt_rate)
+			return *div_array;
+		++div_array;
+	}
+
+	return ~0;	/* No acceptable divider */
+}
+
+/*
+ * Find divisor for the given clock and target rate.
+ *
+ * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
+ * they are only settable as part of virtual_prcm set.
+ */
+static u32 omap2_clksel_round_rate(struct clk *tclk, u32 target_rate,
+	u32 *new_div)
+{
+	u32 gfx_div[] = {2, 3, 4};
+	u32 sysclkout_div[] = {1, 2, 4, 8, 16};
+	u32 dss1_div[] = {1, 2, 3, 4, 5, 6, 8, 9, 12, 16};
+	u32 vylnq_div[] = {1, 2, 3, 4, 6, 8, 9, 12, 16, 18};
+	u32 best_div = ~0, asize = 0;
+	u32 *div_array = NULL;
+
+	switch (tclk->flags & SRC_RATE_SEL_MASK) {
+	case CM_GFX_SEL1:
+		asize = 3;
+		div_array = gfx_div;
+		break;
+	case CM_PLL_SEL1:
+		return omap2_dpll_round_rate(target_rate);
+	case CM_SYSCLKOUT_SEL1:
+		asize = 5;
+		div_array = sysclkout_div;
+		break;
+	case CM_CORE_SEL1:
+		if(tclk == &dss1_fck){
+			if(tclk->parent == &core_ck){
+				asize = 10;
+				div_array = dss1_div;
+			} else {
+				*new_div = 0; /* fixed clk */
+				return(tclk->parent->rate);
+			}
+		} else if((tclk == &vlynq_fck) && cpu_is_omap2420()){
+			if(tclk->parent == &core_ck){
+				asize = 10;
+				div_array = vylnq_div;
+			} else {
+				*new_div = 0; /* fixed clk */
+				return(tclk->parent->rate);
+			}
+		}
+		break;
+	}
+
+	best_div = omap2_divider_from_table(asize, div_array,
+	 tclk->parent->rate, target_rate);
+	if (best_div == ~0){
+		*new_div = 1;
+		return best_div; /* signal error */
+	}
+
+	*new_div = best_div;
+	return (tclk->parent->rate / best_div);
+}
+
+/* Given a clock and a rate apply a clock specific rounding function */
+static long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	u32 new_div = 0;
+	int valid_rate;
+
+	if (clk->flags & RATE_FIXED)
+		return clk->rate;
+
+	if (clk->flags & RATE_CKCTL) {
+		valid_rate = omap2_clksel_round_rate(clk, rate, &new_div);
+		return valid_rate;
+	}
+
+	if (clk->round_rate != 0)
+		return clk->round_rate(clk, rate);
+
+	return clk->rate;
+}
+
+/*
+ * Check the DLL lock state, and return tue if running in unlock mode.
+ * This is needed to compenste for the shifted DLL value in unlock mode.
+ */
+static u32 omap2_dll_force_needed(void)
+{
+	u32 dll_state = SDRC_DLLA_CTRL;		/* dlla and dllb are a set */
+
+	if ((dll_state & (1 << 2)) == (1 << 2))
+		return 1;
+	else
+		return 0;
+}
+
+static void omap2_init_memory_params(u32 force_lock_to_unlock_mode)
+{
+	unsigned long dll_cnt;
+	u32 fast_dll = 0;
+
+	mem_timings.m_type = !((SDRC_MR_0 & 0x3) == 0x1); /* DDR = 1, SDR = 0 */
+
+	/* 2422 es2.05 and beyond has a single SIP DDR instead of 2 like others.
+	 * In the case of 2422, its ok to use CS1 instead of CS0.
+	 */
+
+#if 0	/* FIXME: Enable after 24xx cpu detection works */
+	ctype = get_cpu_type();
+	if (cpu_is_omap2422())
+		mem_timings.base_cs = 1;
+	else
+#endif
+		mem_timings.base_cs = 0;
+
+	if (mem_timings.m_type != M_DDR)
+		return;
+
+	/* With DDR we need to determine the low frequency DLL value */
+	if (((mem_timings.fast_dll_ctrl & (1 << 2)) == M_LOCK_CTRL))
+		mem_timings.dll_mode = M_UNLOCK;
+	else
+		mem_timings.dll_mode = M_LOCK;
+
+	if (mem_timings.base_cs == 0) {
+		fast_dll = SDRC_DLLA_CTRL;
+		dll_cnt = SDRC_DLLA_STATUS & 0xff00;
+	} else {
+		fast_dll = SDRC_DLLB_CTRL;
+		dll_cnt = SDRC_DLLB_STATUS & 0xff00;
+	}
+	if (force_lock_to_unlock_mode) {
+		fast_dll &= ~0xff00;
+		fast_dll |= dll_cnt;		/* Current lock mode */
+	}
+	mem_timings.fast_dll_ctrl = fast_dll;
+
+	/* No disruptions, DDR will be offline & C-ABI not followed */
+	omap2_sram_ddr_init(&mem_timings.slow_dll_ctrl,
+			    mem_timings.fast_dll_ctrl,
+			    mem_timings.base_cs,
+			    force_lock_to_unlock_mode);
+	mem_timings.slow_dll_ctrl &= 0xff00;	/* Keep lock value */
+
+	/* Turn status into unlock ctrl */
+	mem_timings.slow_dll_ctrl |=
+		((mem_timings.fast_dll_ctrl & 0xF) | (1 << 2));
+
+	/* 90 degree phase for anything below 133Mhz */
+	mem_timings.slow_dll_ctrl |= (1 << 1);
+}
+
+static u32 omap2_reprogram_sdrc(u32 level, u32 force)
+{
+	u32 prev = curr_perf_level, flags;
+
+	if ((curr_perf_level == level) && !force)
+		return prev;
+
+	if (level == PRCM_HALF_SPEED) {
+		local_irq_save(flags);
+		PRCM_VOLTSETUP = 0xffff;
+		omap2_sram_reprogram_sdrc(PRCM_HALF_SPEED,
+					  mem_timings.slow_dll_ctrl,
+					  mem_timings.m_type);
+		curr_perf_level = PRCM_HALF_SPEED;
+		local_irq_restore(flags);
+	}
+	if (level == PRCM_FULL_SPEED) {
+		local_irq_save(flags);
+		PRCM_VOLTSETUP = 0xffff;
+		omap2_sram_reprogram_sdrc(PRCM_FULL_SPEED,
+					  mem_timings.fast_dll_ctrl,
+					  mem_timings.m_type);
+		curr_perf_level = PRCM_FULL_SPEED;
+		local_irq_restore(flags);
+	}
+
+	return prev;
+}
+
+static int omap2_reprogram_dpll(struct clk * clk, unsigned long rate)
+{
+	u32 flags, cur_rate, low, mult, div, valid_rate, done_rate;
+	u32 bypass = 0;
+	struct prcm_config tmpset;
+	int ret = -EINVAL;
+
+	local_irq_save(flags);
+	cur_rate = omap2_get_dpll_rate(&dpll_ck);
+	mult = CM_CLKSEL2_PLL & 0x3;
+
+	if ((rate == (cur_rate / 2)) && (mult == 2)) {
+		omap2_reprogram_sdrc(PRCM_HALF_SPEED, 1);
+	} else if ((rate == (cur_rate * 2)) && (mult == 1)) {
+		omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1);
+	} else if (rate != cur_rate) {
+		valid_rate = omap2_dpll_round_rate(rate);
+		if (valid_rate != rate)
+			goto dpll_exit;
+
+		if ((CM_CLKSEL2_PLL & 0x3) == 1)
+			low = curr_prcm_set->dpll_speed;
+		else
+			low = curr_prcm_set->dpll_speed / 2;
+
+		tmpset.cm_clksel1_pll = CM_CLKSEL1_PLL;
+		tmpset.cm_clksel1_pll &= ~(0x3FFF << 8);
+		div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
+		tmpset.cm_clksel2_pll = CM_CLKSEL2_PLL;
+		tmpset.cm_clksel2_pll &= ~0x3;
+		if (rate > low) {
+			tmpset.cm_clksel2_pll |= 0x2;
+			mult = ((rate / 2) / 1000000);
+			done_rate = PRCM_FULL_SPEED;
+		} else {
+			tmpset.cm_clksel2_pll |= 0x1;
+			mult = (rate / 1000000);
+			done_rate = PRCM_HALF_SPEED;
+		}
+		tmpset.cm_clksel1_pll |= ((div << 8) | (mult << 12));
+
+		/* Worst case */
+		tmpset.base_sdrc_rfr = V24XX_SDRC_RFR_CTRL_BYPASS;
+
+		if (rate == curr_prcm_set->xtal_speed)	/* If asking for 1-1 */
+			bypass = 1;
+
+		omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1); /* For init_mem */
+
+		/* Force dll lock mode */
+		omap2_set_prcm(tmpset.cm_clksel1_pll, tmpset.base_sdrc_rfr,
+			       bypass);
+
+		/* Errata: ret dll entry state */
+		omap2_init_memory_params(omap2_dll_force_needed());
+		omap2_reprogram_sdrc(done_rate, 0);
+	}
+	omap2_clksel_recalc(&dpll_ck);
+	ret = 0;
+
+dpll_exit:
+	local_irq_restore(flags);
+	return(ret);
+}
+
+/* Just return the MPU speed */
+static void omap2_mpu_recalc(struct clk * clk)
+{
+	clk->rate = curr_prcm_set->mpu_speed;
+}
+
+/*
+ * Look for a rate equal or less than the target rate given a configuration set.
+ *
+ * What's not entirely clear is "which" field represents the key field.
+ * Some might argue L3-DDR, others ARM, others IVA. This code is simple and
+ * just uses the ARM rates.
+ */
+static long omap2_round_to_table_rate(struct clk * clk, unsigned long rate)
+{
+	struct prcm_config * ptr;
+	long highest_rate;
+
+	if (clk != &virt_prcm_set)
+		return -EINVAL;
+
+	highest_rate = -EINVAL;
+
+	for (ptr = rate_table; ptr->mpu_speed; ptr++) {
+		if (ptr->xtal_speed != sys_ck.rate)
+			continue;
+
+		highest_rate = ptr->mpu_speed;
+
+		/* Can check only after xtal frequency check */
+		if (ptr->mpu_speed <= rate)
+			break;
+	}
+	return highest_rate;
+}
+
+/*
+ * omap2_convert_field_to_div() - turn field value into integer divider
+ */
+static u32 omap2_clksel_to_divisor(u32 div_sel, u32 field_val)
+{
+	u32 i;
+	u32 clkout_array[] = {1, 2, 4, 8, 16};
+
+	if ((div_sel & SRC_RATE_SEL_MASK) == CM_SYSCLKOUT_SEL1) {
+		for (i = 0; i < 5; i++) {
+			if (field_val == i)
+				return clkout_array[i];
+		}
+		return ~0;
+	} else
+		return field_val;
+}
+
+/*
+ * Returns the CLKSEL divider register value
+ * REVISIT: This should be cleaned up to work nicely with void __iomem *
+ */
+static u32 omap2_get_clksel(u32 *div_sel, u32 *field_mask,
+			    struct clk *clk)
+{
+	int ret = ~0;
+	u32 reg_val, div_off;
+	u32 div_addr = 0;
+	u32 mask = ~0;
+
+	div_off = clk->rate_offset;
+
+	switch ((*div_sel & SRC_RATE_SEL_MASK)) {
+	case CM_MPU_SEL1:
+		div_addr = (u32)&CM_CLKSEL_MPU;
+		mask = 0x1f;
+		break;
+	case CM_DSP_SEL1:
+		div_addr = (u32)&CM_CLKSEL_DSP;
+		if (cpu_is_omap2420()) {
+			if ((div_off == 0) || (div_off == 8))
+				mask = 0x1f;
+			else if (div_off == 5)
+				mask = 0x3;
+		} else if (cpu_is_omap2430()) {
+			if (div_off == 0)
+				mask = 0x1f;
+			else if (div_off == 5)
+				mask = 0x3;
+		}
+		break;
+	case CM_GFX_SEL1:
+		div_addr = (u32)&CM_CLKSEL_GFX;
+		if (div_off == 0)
+			mask = 0x7;
+		break;
+	case CM_MODEM_SEL1:
+		div_addr = (u32)&CM_CLKSEL_MDM;
+		if (div_off == 0)
+			mask = 0xf;
+		break;
+	case CM_SYSCLKOUT_SEL1:
+		div_addr = (u32)&PRCM_CLKOUT_CTRL;
+		if ((div_off == 3) || (div_off = 11))
+			mask= 0x3;
+		break;
+	case CM_CORE_SEL1:
+		div_addr = (u32)&CM_CLKSEL1_CORE;
+		switch (div_off) {
+		case 0:					/* l3 */
+		case 8:					/* dss1 */
+		case 15:				/* vylnc-2420 */
+		case 20:				/* ssi */
+			mask = 0x1f; break;
+		case 5:					/* l4 */
+			mask = 0x3; break;
+		case 13:				/* dss2 */
+			mask = 0x1; break;
+		case 25:				/* usb */
+			mask = 0xf; break;
+		}
+	}
+
+	*field_mask = mask;
+
+	if (unlikely(mask == ~0))
+		div_addr = 0;
+
+	*div_sel = div_addr;
+
+	if (unlikely(div_addr == 0))
+		return ret;
+
+	/* Isolate field */
+	reg_val = __raw_readl((void __iomem *)div_addr) & (mask << div_off);
+
+	/* Normalize back to divider value */
+	reg_val >>= div_off;
+
+	return reg_val;
+}
+
+/*
+ * Return divider to be applied to parent clock.
+ * Return 0 on error.
+ */
+static u32 omap2_clksel_get_divisor(struct clk *clk)
+{
+	int ret = 0;
+	u32 div, div_sel, div_off, field_mask, field_val;
+
+	/* isolate control register */
+	div_sel = (SRC_RATE_SEL_MASK & clk->flags);
+
+	div_off = clk->rate_offset;
+	field_val = omap2_get_clksel(&div_sel, &field_mask, clk);
+	if (div_sel == 0)
+		return ret;
+
+	div_sel = (SRC_RATE_SEL_MASK & clk->flags);
+	div = omap2_clksel_to_divisor(div_sel, field_val);
+
+	return div;
+}
+
+/* Set the clock rate for a clock source */
+static int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
+
+{
+	int ret = -EINVAL;
+	void __iomem * reg;
+	u32 div_sel, div_off, field_mask, field_val, reg_val, validrate;
+	u32 new_div = 0;
+
+	if (!(clk->flags & CONFIG_PARTICIPANT) && (clk->flags & RATE_CKCTL)) {
+		if (clk == &dpll_ck)
+			return omap2_reprogram_dpll(clk, rate);
+
+		/* Isolate control register */
+		div_sel = (SRC_RATE_SEL_MASK & clk->flags);
+		div_off = clk->src_offset;
+
+		validrate = omap2_clksel_round_rate(clk, rate, &new_div);
+		if(validrate != rate)
+			return(ret);
+
+		field_val = omap2_get_clksel(&div_sel, &field_mask, clk);
+		if (div_sel == 0)
+			return ret;
+
+		if(clk->flags & CM_SYSCLKOUT_SEL1){
+			switch(new_div){
+			case 16: field_val = 4; break;
+			case 8:  field_val = 3; break;
+			case 4:  field_val = 2; break;
+			case 2:  field_val = 1; break;
+			case 1:  field_val = 0; break;
+			}
+		}
+		else
+			field_val = new_div;
+
+		reg = (void __iomem *)div_sel;
+
+		reg_val = __raw_readl(reg);
+		reg_val &= ~(field_mask << div_off);
+		reg_val |= (field_val << div_off);
+
+		__raw_writel(reg_val, reg);
+		clk->rate = clk->parent->rate / field_val;
+
+		if (clk->flags & DELAYED_APP)
+			__raw_writel(0x1, (void __iomem *)&PRCM_CLKCFG_CTRL);
+		ret = 0;
+	} else if (clk->set_rate != 0)
+		ret = clk->set_rate(clk, rate);
+
+	if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
+		propagate_rate(clk);
+
+	return ret;
+}
+
+/* Converts encoded control register address into a full address */
+static u32 omap2_get_src_field(u32 *type_to_addr, u32 reg_offset,
+			       struct clk *src_clk, u32 *field_mask)
+{
+	u32 val = ~0, src_reg_addr = 0, mask = 0;
+
+	/* Find target control register.*/
+	switch ((*type_to_addr & SRC_RATE_SEL_MASK)) {
+	case CM_CORE_SEL1:
+		src_reg_addr = (u32)&CM_CLKSEL1_CORE;
+		if (reg_offset == 13) {			/* DSS2_fclk */
+			mask = 0x1;
+			if (src_clk == &sys_ck)
+				val = 0;
+			if (src_clk == &func_48m_ck)
+				val = 1;
+		} else if (reg_offset == 8) {		/* DSS1_fclk */
+			mask = 0x1f;
+			if (src_clk == &sys_ck)
+				val = 0;
+			else if (src_clk == &core_ck)	/* divided clock */
+				val = 0x10;		/* rate needs fixing */
+		} else if ((reg_offset == 15) && cpu_is_omap2420()){ /*vlnyq*/
+			mask = 0x1F;
+			if(src_clk == &func_96m_ck)
+				val = 0;
+			else if (src_clk == &core_ck)
+				val = 0x10;
+		}
+		break;
+	case CM_CORE_SEL2:
+		src_reg_addr = (u32)&CM_CLKSEL2_CORE;
+		mask = 0x3;
+		if (src_clk == &func_32k_ck)
+			val = 0x0;
+		if (src_clk == &sys_ck)
+			val = 0x1;
+		if (src_clk == &alt_ck)
+			val = 0x2;
+		break;
+	case CM_WKUP_SEL1:
+		src_reg_addr = (u32)&CM_CLKSEL2_CORE;
+		mask = 0x3;
+		if (src_clk == &func_32k_ck)
+			val = 0x0;
+		if (src_clk == &sys_ck)
+			val = 0x1;
+		if (src_clk == &alt_ck)
+			val = 0x2;
+		break;
+	case CM_PLL_SEL1:
+		src_reg_addr = (u32)&CM_CLKSEL1_PLL;
+		mask = 0x1;
+		if (reg_offset == 0x3) {
+			if (src_clk == &apll96_ck)
+				val = 0;
+			if (src_clk == &alt_ck)
+				val = 1;
+		}
+		else if (reg_offset == 0x5) {
+			if (src_clk == &apll54_ck)
+				val = 0;
+			if (src_clk == &alt_ck)
+				val = 1;
+		}
+		break;
+	case CM_PLL_SEL2:
+		src_reg_addr = (u32)&CM_CLKSEL2_PLL;
+		mask = 0x3;
+		if (src_clk == &func_32k_ck)
+			val = 0x0;
+		if (src_clk == &dpll_ck)
+			val = 0x2;
+		break;
+	case CM_SYSCLKOUT_SEL1:
+		src_reg_addr = (u32)&PRCM_CLKOUT_CTRL;
+		mask = 0x3;
+		if (src_clk == &dpll_ck)
+			val = 0;
+		if (src_clk == &sys_ck)
+			val = 1;
+		if (src_clk == &func_54m_ck)
+			val = 2;
+		if (src_clk == &func_96m_ck)
+			val = 3;
+		break;
+	}
+
+	if (val == ~0)			/* Catch errors in offset */
+		*type_to_addr = 0;
+	else
+		*type_to_addr = src_reg_addr;
+	*field_mask = mask;
+
+	return val;
+}
+
+static int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
+{
+	void __iomem * reg;
+	u32 src_sel, src_off, field_val, field_mask, reg_val, rate;
+	int ret = -EINVAL;
+
+	if (unlikely(clk->flags & CONFIG_PARTICIPANT))
+		return ret;
+
+	if (clk->flags & SRC_SEL_MASK) {	/* On-chip SEL collection */
+		src_sel = (SRC_RATE_SEL_MASK & clk->flags);
+		src_off = clk->src_offset;
+
+		if (src_sel == 0)
+			goto set_parent_error;
+
+		field_val = omap2_get_src_field(&src_sel, src_off, new_parent,
+						&field_mask);
+
+		reg = (void __iomem *)src_sel;
+
+		if (clk->usecount > 0)
+			omap2_clk_disable(clk);
+
+		/* Set new source value (previous dividers if any in effect) */
+		reg_val = __raw_readl(reg) & ~(field_mask << src_off);
+		reg_val |= (field_val << src_off);
+		__raw_writel(reg_val, reg);
+
+		if (clk->flags & DELAYED_APP)
+			__raw_writel(0x1, (void __iomem *)&PRCM_CLKCFG_CTRL);
+
+		if (clk->usecount > 0)
+			omap2_clk_enable(clk);
+
+		clk->parent = new_parent;
+
+		/* SRC_RATE_SEL_MASK clocks follow their parents rates.*/
+		if ((new_parent == &core_ck) && (clk == &dss1_fck))
+			clk->rate = new_parent->rate / 0x10;
+		else
+			clk->rate = new_parent->rate;
+
+		if (unlikely(clk->flags & RATE_PROPAGATES))
+			propagate_rate(clk);
+
+		return 0;
+	} else {
+		clk->parent = new_parent;
+		rate = new_parent->rate;
+		omap2_clk_set_rate(clk, rate);
+		ret = 0;
+	}
+
+ set_parent_error:
+	return ret;
+}
+
+/* Sets basic clocks based on the specified rate */
+static int omap2_select_table_rate(struct clk * clk, unsigned long rate)
+{
+	u32 flags, cur_rate, done_rate, bypass = 0;
+	u8 cpu_mask = 0;
+	struct prcm_config *prcm;
+	unsigned long found_speed = 0;
+
+	if (clk != &virt_prcm_set)
+		return -EINVAL;
+
+	/* FIXME: Change cpu_is_omap2420() to cpu_is_omap242x() */
+	if (cpu_is_omap2420())
+		cpu_mask = RATE_IN_242X;
+	else if (cpu_is_omap2430())
+		cpu_mask = RATE_IN_243X;
+
+	for (prcm = rate_table; prcm->mpu_speed; prcm++) {
+		if (!(prcm->flags & cpu_mask))
+			continue;
+
+		if (prcm->xtal_speed != sys_ck.rate)
+			continue;
+
+		if (prcm->mpu_speed <= rate) {
+			found_speed = prcm->mpu_speed;
+			break;
+		}
+	}
+
+	if (!found_speed) {
+		printk(KERN_INFO "Could not set MPU rate to %luMHz\n",
+	 rate / 1000000);
+		return -EINVAL;
+	}
+
+	curr_prcm_set = prcm;
+	cur_rate = omap2_get_dpll_rate(&dpll_ck);
+
+	if (prcm->dpll_speed == cur_rate / 2) {
+		omap2_reprogram_sdrc(PRCM_HALF_SPEED, 1);
+	} else if (prcm->dpll_speed == cur_rate * 2) {
+		omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1);
+	} else if (prcm->dpll_speed != cur_rate) {
+		local_irq_save(flags);
+
+		if (prcm->dpll_speed == prcm->xtal_speed)
+			bypass = 1;
+
+		if ((prcm->cm_clksel2_pll & 0x3) == 2)
+			done_rate = PRCM_FULL_SPEED;
+		else
+			done_rate = PRCM_HALF_SPEED;
+
+		/* MPU divider */
+		CM_CLKSEL_MPU = prcm->cm_clksel_mpu;
+
+		/* dsp + iva1 div(2420), iva2.1(2430) */
+		CM_CLKSEL_DSP = prcm->cm_clksel_dsp;
+
+		CM_CLKSEL_GFX = prcm->cm_clksel_gfx;
+
+		/* Major subsystem dividers */
+		CM_CLKSEL1_CORE = prcm->cm_clksel1_core;
+		if (cpu_is_omap2430())
+			CM_CLKSEL_MDM = prcm->cm_clksel_mdm;
+
+		/* x2 to enter init_mem */
+		omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1);
+
+		omap2_set_prcm(prcm->cm_clksel1_pll, prcm->base_sdrc_rfr,
+			       bypass);
+
+		omap2_init_memory_params(omap2_dll_force_needed());
+		omap2_reprogram_sdrc(done_rate, 0);
+
+		local_irq_restore(flags);
+	}
+	omap2_clksel_recalc(&dpll_ck);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------
+ * Omap2 clock reset and init functions
+ *-------------------------------------------------------------------------*/
+
+static struct clk_functions omap2_clk_functions = {
+	.clk_enable		= omap2_clk_enable,
+	.clk_disable		= omap2_clk_disable,
+	.clk_use		= omap2_clk_use,
+	.clk_unuse		= omap2_clk_unuse,
+	.clk_round_rate		= omap2_clk_round_rate,
+	.clk_set_rate		= omap2_clk_set_rate,
+	.clk_set_parent		= omap2_clk_set_parent,
+};
+
+static void __init omap2_get_crystal_rate(struct clk *osc, struct clk *sys)
+{
+	u32 div, aplls, sclk = 13000000;
+
+	aplls = CM_CLKSEL1_PLL;
+	aplls &= ((1 << 23) | (1 << 24) | (1 << 25));
+	aplls >>= 23;			/* Isolate field, 0,2,3 */
+
+	if (aplls == 0)
+		sclk = 19200000;
+	else if (aplls == 2)
+		sclk = 13000000;
+	else if (aplls == 3)
+		sclk = 12000000;
+
+	div = PRCM_CLKSRC_CTRL;
+	div &= ((1 << 7) | (1 << 6));
+	div >>= sys->rate_offset;
+
+	osc->rate = sclk * div;
+	sys->rate = sclk;
+}
+
+#ifdef CONFIG_OMAP_RESET_CLOCKS
+static void __init omap2_disable_unused_clocks(void)
+{
+	struct clk *ck;
+	u32 regval32;
+
+	list_for_each_entry(ck, &clocks, node) {
+		if (ck->usecount > 0 || (ck->flags & ALWAYS_ENABLED) ||
+			ck->enable_reg == 0)
+			continue;
+
+		regval32 = __raw_readl(ck->enable_reg);
+		if ((regval32 & (1 << ck->enable_bit)) == 0)
+			continue;
+
+		printk(KERN_INFO "Disabling unused clock \"%s\"\n", ck->name);
+		omap2_clk_disable(ck);
+	}
+}
+late_initcall(omap2_disable_unused_clocks);
+#endif
+
+/*
+ * Switch the MPU rate if specified on cmdline.
+ * We cannot do this early until cmdline is parsed.
+ */
+static int __init omap2_clk_arch_init(void)
+{
+	if (!mpurate)
+		return -EINVAL;
+
+	if (omap2_select_table_rate(&virt_prcm_set, mpurate))
+		printk(KERN_ERR "Could not find matching MPU rate\n");
+
+	propagate_rate(&osc_ck);		/* update main root fast */
+	propagate_rate(&func_32k_ck);		/* update main root slow */
+
+	printk(KERN_INFO "Switched to new clocking rate (Crystal/DPLL/MPU): "
+	       "%ld.%01ld/%ld/%ld MHz\n",
+	       (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
+	       (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
+
+	return 0;
+}
+arch_initcall(omap2_clk_arch_init);
+
+int __init omap2_clk_init(void)
+{
+	struct prcm_config *prcm;
+	struct clk ** clkp;
+	u32 clkrate;
+
+	clk_init(&omap2_clk_functions);
+	omap2_get_crystal_rate(&osc_ck, &sys_ck);
+
+	for (clkp = onchip_clks; clkp < onchip_clks + ARRAY_SIZE(onchip_clks);
+	     clkp++) {
+
+		if ((*clkp)->flags & CLOCK_IN_OMAP242X && cpu_is_omap2420()) {
+			clk_register(*clkp);
+			continue;
+		}
+
+		if ((*clkp)->flags & CLOCK_IN_OMAP243X && cpu_is_omap2430()) {
+			clk_register(*clkp);
+			continue;
+		}
+	}
+
+	/* Check the MPU rate set by bootloader */
+	clkrate = omap2_get_dpll_rate(&dpll_ck);
+	for (prcm = rate_table; prcm->mpu_speed; prcm++) {
+		if (prcm->xtal_speed != sys_ck.rate)
+			continue;
+		if (prcm->dpll_speed <= clkrate)
+			 break;
+	}
+	curr_prcm_set = prcm;
+
+	propagate_rate(&osc_ck);		/* update main root fast */
+	propagate_rate(&func_32k_ck);		/* update main root slow */
+
+	printk(KERN_INFO "Clocking rate (Crystal/DPLL/MPU): "
+	       "%ld.%01ld/%ld/%ld MHz\n",
+	       (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
+	       (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
+
+	/*
+	 * Only enable those clocks we will need, let the drivers
+	 * enable other clocks as necessary
+	 */
+	clk_use(&sync_32k_ick);
+	clk_use(&omapctrl_ick);
+	if (cpu_is_omap2430())
+		clk_use(&sdrc_ick);
+
+	return 0;
+}
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
new file mode 100644
index 0000000..4aeab55
--- /dev/null
+++ b/arch/arm/mach-omap2/clock.h
@@ -0,0 +1,2103 @@
+/*
+ *  linux/arch/arm/mach-omap24xx/clock.h
+ *
+ *  Copyright (C) 2005 Texas Instruments Inc.
+ *  Richard Woodruff <r-woodruff2@ti.com>
+ *  Created for OMAP2.
+ *
+ *  Copyright (C) 2004 Nokia corporation
+ *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *  Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK_H
+#define __ARCH_ARM_MACH_OMAP2_CLOCK_H
+
+static void omap2_sys_clk_recalc(struct clk * clk);
+static void omap2_clksel_recalc(struct clk * clk);
+static void omap2_followparent_recalc(struct clk * clk);
+static void omap2_propagate_rate(struct clk * clk);
+static void omap2_mpu_recalc(struct clk * clk);
+static int omap2_select_table_rate(struct clk * clk, unsigned long rate);
+static long omap2_round_to_table_rate(struct clk * clk, unsigned long rate);
+static void omap2_clk_unuse(struct clk *clk);
+static void omap2_sys_clk_recalc(struct clk * clk);
+static u32 omap2_clksel_to_divisor(u32 div_sel, u32 field_val);
+static u32 omap2_clksel_get_divisor(struct clk *clk);
+
+
+#define RATE_IN_242X	(1 << 0)
+#define RATE_IN_243X	(1 << 1)
+
+/* Memory timings */
+#define M_DDR		1
+#define M_LOCK_CTRL	(1 << 2)
+#define M_UNLOCK	0
+#define M_LOCK		1
+
+struct memory_timings {
+	u32 m_type;		/* ddr = 1, sdr = 0 */
+	u32 dll_mode;		/* use lock mode = 1, unlock mode = 0 */
+	u32 slow_dll_ctrl;	/* unlock mode, dll value for slow speed */
+	u32 fast_dll_ctrl;	/* unlock mode, dll value for fast speed */
+	u32 base_cs;		/* base chip select to use for calculations */
+};
+
+/* Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
+ * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,CM_CLKSEL_DSP
+ * CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL CM_CLKSEL2_PLL, CM_CLKSEL_MDM
+ */
+struct prcm_config {
+	unsigned long xtal_speed;	/* crystal rate */
+	unsigned long dpll_speed;	/* dpll: out*xtal*M/(N-1)table_recalc */
+	unsigned long mpu_speed;	/* speed of MPU */
+	unsigned long cm_clksel_mpu;	/* mpu divider */
+	unsigned long cm_clksel_dsp;	/* dsp+iva1 div(2420), iva2.1(2430) */
+	unsigned long cm_clksel_gfx;	/* gfx dividers */
+	unsigned long cm_clksel1_core;	/* major subsystem dividers */
+	unsigned long cm_clksel1_pll;	/* m,n */
+	unsigned long cm_clksel2_pll;	/* dpllx1 or x2 out */
+	unsigned long cm_clksel_mdm;	/* modem dividers 2430 only */
+	unsigned long base_sdrc_rfr;	/* base refresh timing for a set */
+	unsigned char flags;
+};
+
+/* Mask for clksel which support parent settign in set_rate */
+#define SRC_SEL_MASK (CM_CORE_SEL1 | CM_CORE_SEL2 | CM_WKUP_SEL1 | \
+			CM_PLL_SEL1 | CM_PLL_SEL2 | CM_SYSCLKOUT_SEL1)
+
+/* Mask for clksel regs which support rate operations */
+#define SRC_RATE_SEL_MASK (CM_MPU_SEL1 | CM_DSP_SEL1 | CM_GFX_SEL1 | \
+			CM_MODEM_SEL1 | CM_CORE_SEL1 | CM_CORE_SEL2 | \
+			CM_WKUP_SEL1 | CM_PLL_SEL1 | CM_PLL_SEL2 | \
+			CM_SYSCLKOUT_SEL1)
+
+/*
+ * The OMAP2 processor can be run at several discrete 'PRCM configurations'.
+ * These configurations are characterized by voltage and speed for clocks.
+ * The device is only validated for certain combinations. One way to express
+ * these combinations is via the 'ratio's' which the clocks operate with
+ * respect to each other. These ratio sets are for a given voltage/DPLL
+ * setting. All configurations can be described by a DPLL setting and a ratio
+ * There are 3 ratio sets for the 2430 and X ratio sets for 2420.
+ *
+ * 2430 differs from 2420 in that there are no more phase synchronizers used.
+ * They both have a slightly different clock domain setup. 2420(iva1,dsp) vs
+ * 2430 (iva2.1, NOdsp, mdm)
+ */
+
+/* Core fields for cm_clksel, not ratio governed */
+#define RX_CLKSEL_DSS1			(0x10 << 8)
+#define RX_CLKSEL_DSS2			(0x0 << 13)
+#define RX_CLKSEL_SSI			(0x5 << 20)
+
+/*-------------------------------------------------------------------------
+ * Voltage/DPLL ratios
+ *-------------------------------------------------------------------------*/
+
+/* 2430 Ratio's, 2430-Ratio Config 1 */
+#define R1_CLKSEL_L3			(4 << 0)
+#define R1_CLKSEL_L4			(2 << 5)
+#define R1_CLKSEL_USB			(4 << 25)
+#define R1_CM_CLKSEL1_CORE_VAL		R1_CLKSEL_USB | RX_CLKSEL_SSI | \
+					RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+					R1_CLKSEL_L4 | R1_CLKSEL_L3
+#define R1_CLKSEL_MPU			(2 << 0)
+#define R1_CM_CLKSEL_MPU_VAL		R1_CLKSEL_MPU
+#define R1_CLKSEL_DSP			(2 << 0)
+#define R1_CLKSEL_DSP_IF		(2 << 5)
+#define R1_CM_CLKSEL_DSP_VAL		R1_CLKSEL_DSP | R1_CLKSEL_DSP_IF
+#define R1_CLKSEL_GFX			(2 << 0)
+#define R1_CM_CLKSEL_GFX_VAL		R1_CLKSEL_GFX
+#define R1_CLKSEL_MDM			(4 << 0)
+#define R1_CM_CLKSEL_MDM_VAL		R1_CLKSEL_MDM
+
+/* 2430-Ratio Config 2 */
+#define R2_CLKSEL_L3			(6 << 0)
+#define R2_CLKSEL_L4			(2 << 5)
+#define R2_CLKSEL_USB			(2 << 25)
+#define R2_CM_CLKSEL1_CORE_VAL		R2_CLKSEL_USB | RX_CLKSEL_SSI | \
+					RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+					R2_CLKSEL_L4 | R2_CLKSEL_L3
+#define R2_CLKSEL_MPU			(2 << 0)
+#define R2_CM_CLKSEL_MPU_VAL		R2_CLKSEL_MPU
+#define R2_CLKSEL_DSP			(2 << 0)
+#define R2_CLKSEL_DSP_IF		(3 << 5)
+#define R2_CM_CLKSEL_DSP_VAL		R2_CLKSEL_DSP | R2_CLKSEL_DSP_IF
+#define R2_CLKSEL_GFX			(2 << 0)
+#define R2_CM_CLKSEL_GFX_VAL		R2_CLKSEL_GFX
+#define R2_CLKSEL_MDM			(6 << 0)
+#define R2_CM_CLKSEL_MDM_VAL		R2_CLKSEL_MDM
+
+/* 2430-Ratio Bootm (BYPASS) */
+#define RB_CLKSEL_L3			(1 << 0)
+#define RB_CLKSEL_L4			(1 << 5)
+#define RB_CLKSEL_USB			(1 << 25)
+#define RB_CM_CLKSEL1_CORE_VAL		RB_CLKSEL_USB | RX_CLKSEL_SSI | \
+					RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+					RB_CLKSEL_L4 | RB_CLKSEL_L3
+#define RB_CLKSEL_MPU			(1 << 0)
+#define RB_CM_CLKSEL_MPU_VAL		RB_CLKSEL_MPU
+#define RB_CLKSEL_DSP			(1 << 0)
+#define RB_CLKSEL_DSP_IF		(1 << 5)
+#define RB_CM_CLKSEL_DSP_VAL		RB_CLKSEL_DSP | RB_CLKSEL_DSP_IF
+#define RB_CLKSEL_GFX			(1 << 0)
+#define RB_CM_CLKSEL_GFX_VAL		RB_CLKSEL_GFX
+#define RB_CLKSEL_MDM			(1 << 0)
+#define RB_CM_CLKSEL_MDM_VAL		RB_CLKSEL_MDM
+
+/* 2420 Ratio Equivalents */
+#define RXX_CLKSEL_VLYNQ		(0x12 << 15)
+#define RXX_CLKSEL_SSI			(0x8 << 20)
+
+/* 2420-PRCM III 532MHz core */
+#define RIII_CLKSEL_L3			(4 << 0)	/* 133MHz */
+#define RIII_CLKSEL_L4			(2 << 5)	/* 66.5MHz */
+#define RIII_CLKSEL_USB			(4 << 25)	/* 33.25MHz */
+#define RIII_CM_CLKSEL1_CORE_VAL	RIII_CLKSEL_USB | RXX_CLKSEL_SSI | \
+					RXX_CLKSEL_VLYNQ | RX_CLKSEL_DSS2 | \
+					RX_CLKSEL_DSS1 | RIII_CLKSEL_L4 | \
+					RIII_CLKSEL_L3
+#define RIII_CLKSEL_MPU			(2 << 0)	/* 266MHz */
+#define RIII_CM_CLKSEL_MPU_VAL		RIII_CLKSEL_MPU
+#define RIII_CLKSEL_DSP			(3 << 0)	/* c5x - 177.3MHz */
+#define RIII_CLKSEL_DSP_IF		(2 << 5)	/* c5x - 88.67MHz */
+#define RIII_SYNC_DSP			(1 << 7)	/* Enable sync */
+#define RIII_CLKSEL_IVA			(6 << 8)	/* iva1 - 88.67MHz */
+#define RIII_SYNC_IVA			(1 << 13)	/* Enable sync */
+#define RIII_CM_CLKSEL_DSP_VAL		RIII_SYNC_IVA | RIII_CLKSEL_IVA | \
+					RIII_SYNC_DSP | RIII_CLKSEL_DSP_IF | \
+					RIII_CLKSEL_DSP
+#define RIII_CLKSEL_GFX			(2 << 0)	/* 66.5MHz */
+#define RIII_CM_CLKSEL_GFX_VAL		RIII_CLKSEL_GFX
+
+/* 2420-PRCM II 600MHz core */
+#define RII_CLKSEL_L3			(6 << 0)	/* 100MHz */
+#define RII_CLKSEL_L4			(2 << 5)	/* 50MHz */
+#define RII_CLKSEL_USB			(2 << 25)	/* 50MHz */
+#define RII_CM_CLKSEL1_CORE_VAL		RII_CLKSEL_USB | \
+					RXX_CLKSEL_SSI | RXX_CLKSEL_VLYNQ | \
+					RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+					RII_CLKSEL_L4 | RII_CLKSEL_L3
+#define RII_CLKSEL_MPU			(2 << 0)	/* 300MHz */
+#define RII_CM_CLKSEL_MPU_VAL		RII_CLKSEL_MPU
+#define RII_CLKSEL_DSP			(3 << 0)	/* c5x - 200MHz */
+#define RII_CLKSEL_DSP_IF		(2 << 5)	/* c5x - 100MHz */
+#define RII_SYNC_DSP			(0 << 7)	/* Bypass sync */
+#define RII_CLKSEL_IVA			(6 << 8)	/* iva1 - 200MHz */
+#define RII_SYNC_IVA			(0 << 13)	/* Bypass sync */
+#define RII_CM_CLKSEL_DSP_VAL		RII_SYNC_IVA | RII_CLKSEL_IVA | \
+					RII_SYNC_DSP | RII_CLKSEL_DSP_IF | \
+					RII_CLKSEL_DSP
+#define RII_CLKSEL_GFX			(2 << 0)	/* 50MHz */
+#define RII_CM_CLKSEL_GFX_VAL		RII_CLKSEL_GFX
+
+/* 2420-PRCM VII (boot) */
+#define RVII_CLKSEL_L3			(1 << 0)
+#define RVII_CLKSEL_L4			(1 << 5)
+#define RVII_CLKSEL_DSS1		(1 << 8)
+#define RVII_CLKSEL_DSS2		(0 << 13)
+#define RVII_CLKSEL_VLYNQ		(1 << 15)
+#define RVII_CLKSEL_SSI			(1 << 20)
+#define RVII_CLKSEL_USB			(1 << 25)
+
+#define RVII_CM_CLKSEL1_CORE_VAL	RVII_CLKSEL_USB | RVII_CLKSEL_SSI | \
+					RVII_CLKSEL_VLYNQ | RVII_CLKSEL_DSS2 | \
+					RVII_CLKSEL_DSS1 | RVII_CLKSEL_L4 | RVII_CLKSEL_L3
+
+#define RVII_CLKSEL_MPU			(1 << 0) /* all divide by 1 */
+#define RVII_CM_CLKSEL_MPU_VAL		RVII_CLKSEL_MPU
+
+#define RVII_CLKSEL_DSP			(1 << 0)
+#define RVII_CLKSEL_DSP_IF		(1 << 5)
+#define RVII_SYNC_DSP			(0 << 7)
+#define RVII_CLKSEL_IVA			(1 << 8)
+#define RVII_SYNC_IVA			(0 << 13)
+#define RVII_CM_CLKSEL_DSP_VAL		RVII_SYNC_IVA | RVII_CLKSEL_IVA | RVII_SYNC_DSP | \
+					RVII_CLKSEL_DSP_IF | RVII_CLKSEL_DSP
+
+#define RVII_CLKSEL_GFX			(1 << 0)
+#define RVII_CM_CLKSEL_GFX_VAL		RVII_CLKSEL_GFX
+
+/*-------------------------------------------------------------------------
+ * 2430 Target modes: Along with each configuration the CPU has several
+ * modes which goes along with them. Modes mainly are the addition of
+ * describe DPLL combinations to go along with a ratio.
+ *-------------------------------------------------------------------------*/
+
+/* Hardware governed */
+#define MX_48M_SRC			(0 << 3)
+#define MX_54M_SRC			(0 << 5)
+#define MX_APLLS_CLIKIN_12		(3 << 23)
+#define MX_APLLS_CLIKIN_13		(2 << 23)
+#define MX_APLLS_CLIKIN_19_2		(0 << 23)
+
+/*
+ * 2430 - standalone, 2*ref*M/(n+1), M/N is for exactness not relock speed
+ * #2	(ratio1) baseport-target
+ * #5a	(ratio1) baseport-target, target DPLL = 266*2 = 532MHz
+ */
+#define M5A_DPLL_MULT_12		(133 << 12)
+#define M5A_DPLL_DIV_12			(5 << 8)
+#define M5A_CM_CLKSEL1_PLL_12_VAL	MX_48M_SRC | MX_54M_SRC | \
+					M5A_DPLL_DIV_12 | M5A_DPLL_MULT_12 | \
+					MX_APLLS_CLIKIN_12
+#define M5A_DPLL_MULT_13		(266 << 12)
+#define M5A_DPLL_DIV_13			(12 << 8)
+#define M5A_CM_CLKSEL1_PLL_13_VAL	MX_48M_SRC | MX_54M_SRC | \
+					M5A_DPLL_DIV_13 | M5A_DPLL_MULT_13 | \
+					MX_APLLS_CLIKIN_13
+#define M5A_DPLL_MULT_19		(180 << 12)
+#define M5A_DPLL_DIV_19			(12 << 8)
+#define M5A_CM_CLKSEL1_PLL_19_VAL	MX_48M_SRC | MX_54M_SRC | \
+					M5A_DPLL_DIV_19 | M5A_DPLL_MULT_19 | \
+					MX_APLLS_CLIKIN_19_2
+/* #5b	(ratio1) target DPLL = 200*2 = 400MHz */
+#define M5B_DPLL_MULT_12		(50 << 12)
+#define M5B_DPLL_DIV_12			(2 << 8)
+#define M5B_CM_CLKSEL1_PLL_12_VAL	MX_48M_SRC | MX_54M_SRC | \
+					M5B_DPLL_DIV_12 | M5B_DPLL_MULT_12 | \
+					MX_APLLS_CLIKIN_12
+#define M5B_DPLL_MULT_13		(200 << 12)
+#define M5B_DPLL_DIV_13			(12 << 8)
+
+#define M5B_CM_CLKSEL1_PLL_13_VAL	MX_48M_SRC | MX_54M_SRC | \
+					M5B_DPLL_DIV_13 | M5B_DPLL_MULT_13 | \
+					MX_APLLS_CLIKIN_13
+#define M5B_DPLL_MULT_19		(125 << 12)
+#define M5B_DPLL_DIV_19			(31 << 8)
+#define M5B_CM_CLKSEL1_PLL_19_VAL	MX_48M_SRC | MX_54M_SRC | \
+					M5B_DPLL_DIV_19 | M5B_DPLL_MULT_19 | \
+					MX_APLLS_CLIKIN_19_2
+/*
+ * #4	(ratio2)
+ * #3	(ratio2) baseport-target, target DPLL = 330*2 = 660MHz
+ */
+#define M3_DPLL_MULT_12			(55 << 12)
+#define M3_DPLL_DIV_12			(1 << 8)
+#define M3_CM_CLKSEL1_PLL_12_VAL	MX_48M_SRC | MX_54M_SRC | \
+					M3_DPLL_DIV_12 | M3_DPLL_MULT_12 | \
+					MX_APLLS_CLIKIN_12
+#define M3_DPLL_MULT_13			(330 << 12)
+#define M3_DPLL_DIV_13			(12 << 8)
+#define M3_CM_CLKSEL1_PLL_13_VAL	MX_48M_SRC | MX_54M_SRC | \
+					M3_DPLL_DIV_13 | M3_DPLL_MULT_13 | \
+					MX_APLLS_CLIKIN_13
+#define M3_DPLL_MULT_19			(275 << 12)
+#define M3_DPLL_DIV_19			(15 << 8)
+#define M3_CM_CLKSEL1_PLL_19_VAL	MX_48M_SRC | MX_54M_SRC | \
+					M3_DPLL_DIV_19 | M3_DPLL_MULT_19 | \
+					MX_APLLS_CLIKIN_19_2
+/* boot (boot) */
+#define MB_DPLL_MULT			(1 << 12)
+#define MB_DPLL_DIV			(0 << 8)
+#define MB_CM_CLKSEL1_PLL_12_VAL	MX_48M_SRC | MX_54M_SRC | MB_DPLL_DIV |\
+					MB_DPLL_MULT | MX_APLLS_CLIKIN_12
+
+#define MB_CM_CLKSEL1_PLL_13_VAL	MX_48M_SRC | MX_54M_SRC | MB_DPLL_DIV |\
+					MB_DPLL_MULT | MX_APLLS_CLIKIN_13
+
+#define MB_CM_CLKSEL1_PLL_19_VAL	MX_48M_SRC | MX_54M_SRC | MB_DPLL_DIV |\
+					MB_DPLL_MULT | MX_APLLS_CLIKIN_19
+
+/*
+ * 2430 - chassis (sedna)
+ * 165 (ratio1) same as above #2
+ * 150 (ratio1)
+ * 133 (ratio2) same as above #4
+ * 110 (ratio2) same as above #3
+ * 104 (ratio2)
+ * boot (boot)
+ */
+
+/*
+ * 2420 Equivalent - mode registers
+ * PRCM II , target DPLL = 2*300MHz = 600MHz
+ */
+#define MII_DPLL_MULT_12		(50 << 12)
+#define MII_DPLL_DIV_12			(1 << 8)
+#define MII_CM_CLKSEL1_PLL_12_VAL	MX_48M_SRC | MX_54M_SRC | \
+					MII_DPLL_DIV_12 | MII_DPLL_MULT_12 | \
+					MX_APLLS_CLIKIN_12
+#define MII_DPLL_MULT_13		(300 << 12)
+#define MII_DPLL_DIV_13			(12 << 8)
+#define MII_CM_CLKSEL1_PLL_13_VAL	MX_48M_SRC | MX_54M_SRC | \
+					MII_DPLL_DIV_13 | MII_DPLL_MULT_13 | \
+					MX_APLLS_CLIKIN_13
+
+/* PRCM III target DPLL = 2*266 = 532MHz*/
+#define MIII_DPLL_MULT_12		(133 << 12)
+#define MIII_DPLL_DIV_12		(5 << 8)
+#define MIII_CM_CLKSEL1_PLL_12_VAL	MX_48M_SRC | MX_54M_SRC | \
+					MIII_DPLL_DIV_12 | MIII_DPLL_MULT_12 | \
+					MX_APLLS_CLIKIN_12
+#define MIII_DPLL_MULT_13		(266 << 12)
+#define MIII_DPLL_DIV_13		(12 << 8)
+#define MIII_CM_CLKSEL1_PLL_13_VAL	MX_48M_SRC | MX_54M_SRC | \
+					MIII_DPLL_DIV_13 | MIII_DPLL_MULT_13 | \
+					MX_APLLS_CLIKIN_13
+
+/* PRCM VII (boot bypass) */
+#define MVII_CM_CLKSEL1_PLL_12_VAL	MB_CM_CLKSEL1_PLL_12_VAL
+#define MVII_CM_CLKSEL1_PLL_13_VAL	MB_CM_CLKSEL1_PLL_13_VAL
+
+/* High and low operation value */
+#define MX_CLKSEL2_PLL_2x_VAL		(2 << 0)
+#define MX_CLKSEL2_PLL_1x_VAL		(1 << 0)
+
+/*
+ * These represent optimal values for common parts, it won't work for all.
+ * As long as you scale down, most parameters are still work, they just
+ * become sub-optimal. The RFR value goes in the oppisite direction. If you
+ * don't adjust it down as your clock period increases the refresh interval
+ * will not be met. Setting all parameters for complete worst case may work,
+ * but may cut memory performance by 2x. Due to errata the DLLs need to be
+ * unlocked and their value needs run time calibration.	A dynamic call is
+ * need for that as no single right value exists acorss production samples.
+ *
+ * Only the FULL speed values are given. Current code is such that rate
+ * changes must be made at DPLLoutx2. The actual value adjustment for low
+ * frequency operation will be handled by omap_set_performance()
+ *
+ * By having the boot loader boot up in the fastest L4 speed available likely
+ * will result in something which you can switch between.
+ */
+#define V24XX_SDRC_RFR_CTRL_133MHz	(0x0003de00 | 1)
+#define V24XX_SDRC_RFR_CTRL_100MHz	(0x0002da01 | 1)
+#define V24XX_SDRC_RFR_CTRL_110MHz	(0x0002da01 | 1) /* Need to calc */
+#define V24XX_SDRC_RFR_CTRL_BYPASS	(0x00005000 | 1) /* Need to calc */
+
+/* MPU speed defines */
+#define S12M	12000000
+#define S13M	13000000
+#define S19M	19200000
+#define S26M	26000000
+#define S100M	100000000
+#define S133M	133000000
+#define S150M	150000000
+#define S165M	165000000
+#define S200M	200000000
+#define S266M	266000000
+#define S300M	300000000
+#define S330M	330000000
+#define S400M	400000000
+#define S532M	532000000
+#define S600M	600000000
+#define S660M	660000000
+
+/*-------------------------------------------------------------------------
+ * Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
+ * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,
+ * CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL,
+ * CM_CLKSEL2_PLL, CM_CLKSEL_MDM
+ *
+ * Filling in table based on H4 boards and 2430-SDPs variants available.
+ * There are quite a few more rates combinations which could be defined.
+ *
+ * When multiple values are defiend the start up will try and choose the
+ * fastest one. If a 'fast' value is defined, then automatically, the /2
+ * one should be included as it can be used.	Generally having more that
+ * one fast set does not make sense, as static timings need to be changed
+ * to change the set.	 The exception is the bypass setting which is
+ * availble for low power bypass.
+ *
+ * Note: This table needs to be sorted, fastest to slowest.
+ *-------------------------------------------------------------------------*/
+static struct prcm_config rate_table[] = {
+	/* PRCM II - FAST */
+	{S12M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL,		/* 300MHz ARM */
+		RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+		RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_100MHz,
+		RATE_IN_242X},
+
+	{S13M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL,		/* 300MHz ARM */
+		RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+		RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_100MHz,
+		RATE_IN_242X},
+
+	/* PRCM III - FAST */
+	{S12M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL,		/* 266MHz ARM */
+		RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+		RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_133MHz,
+		RATE_IN_242X},
+
+	{S13M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL,		/* 266MHz ARM */
+		RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+		RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_133MHz,
+		RATE_IN_242X},
+
+	/* PRCM II - SLOW */
+	{S12M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL,		/* 150MHz ARM */
+		RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+		RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_100MHz,
+		RATE_IN_242X},
+
+	{S13M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL,		/* 150MHz ARM */
+		RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+		RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_100MHz,
+		RATE_IN_242X},
+
+	/* PRCM III - SLOW */
+	{S12M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL,		/* 133MHz ARM */
+		RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+		RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_133MHz,
+		RATE_IN_242X},
+
+	{S13M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL,		/* 133MHz ARM */
+		RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+		RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_133MHz,
+		RATE_IN_242X},
+
+	/* PRCM-VII (boot-bypass) */
+	{S12M, S12M, S12M, RVII_CM_CLKSEL_MPU_VAL,		/* 12MHz ARM*/
+		RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL,
+		RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_12_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_BYPASS,
+		RATE_IN_242X},
+
+	/* PRCM-VII (boot-bypass) */
+	{S13M, S13M, S13M, RVII_CM_CLKSEL_MPU_VAL,		/* 13MHz ARM */
+		RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL,
+		RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_BYPASS,
+		RATE_IN_242X},
+
+	/* PRCM #3 - ratio2 (ES2) - FAST */
+	{S13M, S660M, S330M, R2_CM_CLKSEL_MPU_VAL,		/* 330MHz ARM */
+		R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL,
+		R2_CM_CLKSEL1_CORE_VAL, M3_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, R2_CM_CLKSEL_MDM_VAL,
+		V24XX_SDRC_RFR_CTRL_110MHz,
+		RATE_IN_243X},
+
+	/* PRCM #5a - ratio1 - FAST */
+	{S13M, S532M, S266M, R1_CM_CLKSEL_MPU_VAL,		/* 266MHz ARM */
+		R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+		R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
+		V24XX_SDRC_RFR_CTRL_133MHz,
+		RATE_IN_243X},
+
+	/* PRCM #5b - ratio1 - FAST */
+	{S13M, S400M, S200M, R1_CM_CLKSEL_MPU_VAL,		/* 200MHz ARM */
+		R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+		R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
+		V24XX_SDRC_RFR_CTRL_100MHz,
+		RATE_IN_243X},
+
+	/* PRCM #3 - ratio2 (ES2) - SLOW */
+	{S13M, S330M, S165M, R2_CM_CLKSEL_MPU_VAL,		/* 165MHz ARM */
+		R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL,
+		R2_CM_CLKSEL1_CORE_VAL, M3_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_1x_VAL, R2_CM_CLKSEL_MDM_VAL,
+		V24XX_SDRC_RFR_CTRL_110MHz,
+		RATE_IN_243X},
+
+	/* PRCM #5a - ratio1 - SLOW */
+	{S13M, S266M, S133M, R1_CM_CLKSEL_MPU_VAL,		/* 133MHz ARM */
+		R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+		R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
+		V24XX_SDRC_RFR_CTRL_133MHz,
+		RATE_IN_243X},
+
+	/* PRCM #5b - ratio1 - SLOW*/
+	{S13M, S200M, S100M, R1_CM_CLKSEL_MPU_VAL,		/* 100MHz ARM */
+		R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+		R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
+		V24XX_SDRC_RFR_CTRL_100MHz,
+		RATE_IN_243X},
+
+	/* PRCM-boot/bypass */
+	{S13M, S13M, S13M, RB_CM_CLKSEL_MPU_VAL,		/* 13Mhz */
+		RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL,
+		RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_13_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL,
+		V24XX_SDRC_RFR_CTRL_BYPASS,
+		RATE_IN_243X},
+
+	/* PRCM-boot/bypass */
+	{S12M, S12M, S12M, RB_CM_CLKSEL_MPU_VAL,		/* 12Mhz */
+		RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL,
+		RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_12_VAL,
+		MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL,
+		V24XX_SDRC_RFR_CTRL_BYPASS,
+		RATE_IN_243X},
+
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+/*-------------------------------------------------------------------------
+ * 24xx clock tree.
+ *
+ * NOTE:In many cases here we are assigning a 'default' parent.	In many
+ *	cases the parent is selectable.	The get/set parent calls will also
+ *	switch sources.
+ *
+ *	Many some clocks say always_enabled, but they can be auto idled for
+ *	power savings. They will always be available upon clock request.
+ *
+ *	Several sources are given initial rates which may be wrong, this will
+ *	be fixed up in the init func.
+ *
+ *	Things are broadly separated below by clock domains. It is
+ *	noteworthy that most periferals have dependencies on multiple clock
+ *	domains. Many get their interface clocks from the L4 domain, but get
+ *	functional clocks from fixed sources or other core domain derived
+ *	clocks.
+ *-------------------------------------------------------------------------*/
+
+/* Base external input clocks */
+static struct clk func_32k_ck = {
+	.name		= "func_32k_ck",
+	.rate		= 32000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | ALWAYS_ENABLED,
+};
+
+/* Typical 12/13MHz in standalone mode, will be 26Mhz in chassis mode */
+static struct clk osc_ck = {		/* (*12, *13, 19.2, *26, 38.4)MHz */
+	.name		= "osc_ck",
+	.rate		= 26000000,		/* fixed up in clock init */
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | ALWAYS_ENABLED | RATE_PROPAGATES,
+};
+
+/* With out modem likely 12MHz, with modem likely 13MHz */
+static struct clk sys_ck = {		/* (*12, *13, 19.2, 26, 38.4)MHz */
+	.name		= "sys_ck",		/* ~ ref_clk also */
+	.parent		= &osc_ck,
+	.rate		= 13000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | ALWAYS_ENABLED | RATE_PROPAGATES,
+	.rate_offset	= 6, /* sysclkdiv 1 or 2, already handled or no boot */
+	.recalc		= &omap2_sys_clk_recalc,
+};
+
+static struct clk alt_ck = {		/* Typical 54M or 48M, may not exist */
+	.name		= "alt_ck",
+	.rate		= 54000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | ALWAYS_ENABLED | RATE_PROPAGATES,
+	.recalc		= &omap2_propagate_rate,
+};
+
+/*
+ * Analog domain root source clocks
+ */
+
+/* dpll_ck, is broken out in to special cases through clksel */
+static struct clk dpll_ck = {
+	.name		= "dpll_ck",
+	.parent		= &sys_ck,		/* Can be func_32k also */
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_PROPAGATES | RATE_CKCTL | CM_PLL_SEL1,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk apll96_ck = {
+	.name		= "apll96_ck",
+	.parent		= &sys_ck,
+	.rate		= 96000000,
+	.flags		= CLOCK_IN_OMAP242X |CLOCK_IN_OMAP243X |
+				RATE_FIXED | RATE_PROPAGATES,
+	.enable_reg	= (void __iomem *)&CM_CLKEN_PLL,
+	.enable_bit	= 0x2,
+	.recalc		= &omap2_propagate_rate,
+};
+
+static struct clk apll54_ck = {
+	.name		= "apll54_ck",
+	.parent		= &sys_ck,
+	.rate		= 54000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | RATE_PROPAGATES,
+	.enable_reg	= (void __iomem *)&CM_CLKEN_PLL,
+	.enable_bit	= 0x6,
+	.recalc		= &omap2_propagate_rate,
+};
+
+/*
+ * PRCM digital base sources
+ */
+static struct clk func_54m_ck = {
+	.name		= "func_54m_ck",
+	.parent		= &apll54_ck,	/* can also be alt_clk */
+	.rate		= 54000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | CM_PLL_SEL1 | RATE_PROPAGATES,
+	.src_offset	= 5,
+	.enable_reg	= (void __iomem *)&CM_CLKEN_PLL,
+	.enable_bit	= 0xff,
+	.recalc		= &omap2_propagate_rate,
+};
+
+static struct clk core_ck = {
+	.name		= "core_ck",
+	.parent		= &dpll_ck,		/* can also be 32k */
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				ALWAYS_ENABLED | RATE_PROPAGATES,
+	.recalc		= &omap2_propagate_rate,
+};
+
+static struct clk sleep_ck = {		/* sys_clk or 32k */
+	.name		= "sleep_ck",
+	.parent		= &func_32k_ck,
+	.rate		= 32000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.recalc		= &omap2_propagate_rate,
+};
+
+static struct clk func_96m_ck = {
+	.name		= "func_96m_ck",
+	.parent		= &apll96_ck,
+	.rate		= 96000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | RATE_PROPAGATES,
+	.enable_reg	= (void __iomem *)&CM_CLKEN_PLL,
+	.enable_bit	= 0xff,
+	.recalc		= &omap2_propagate_rate,
+};
+
+static struct clk func_48m_ck = {
+	.name		= "func_48m_ck",
+	.parent		= &apll96_ck,	 /* 96M or Alt */
+	.rate		= 48000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | CM_PLL_SEL1 | RATE_PROPAGATES,
+	.src_offset	= 3,
+	.enable_reg	= (void __iomem *)&CM_CLKEN_PLL,
+	.enable_bit	= 0xff,
+	.recalc		= &omap2_propagate_rate,
+};
+
+static struct clk func_12m_ck = {
+	.name		= "func_12m_ck",
+	.parent		= &func_48m_ck,
+	.rate		= 12000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | RATE_PROPAGATES,
+	.recalc		= &omap2_propagate_rate,
+	.enable_reg	= (void __iomem *)&CM_CLKEN_PLL,
+	.enable_bit	= 0xff,
+};
+
+/* Secure timer, only available in secure mode */
+static struct clk wdt1_osc_ck = {
+	.name		= "ck_wdt1_osc",
+	.parent		= &osc_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk sys_clkout = {
+	.name		= "sys_clkout",
+	.parent		= &func_54m_ck,
+	.rate		= 54000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_SYSCLKOUT_SEL1 | RATE_CKCTL,
+	.src_offset	= 0,
+	.enable_reg	= (void __iomem *)&PRCM_CLKOUT_CTRL,
+	.enable_bit	= 7,
+	.rate_offset	= 3,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+/* In 2430, new in 2420 ES2 */
+static struct clk sys_clkout2 = {
+	.name		= "sys_clkout2",
+	.parent		= &func_54m_ck,
+	.rate		= 54000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_SYSCLKOUT_SEL1 | RATE_CKCTL,
+	.src_offset	= 8,
+	.enable_reg	= (void __iomem *)&PRCM_CLKOUT_CTRL,
+	.enable_bit	= 15,
+	.rate_offset	= 11,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+/*
+ * MPU clock domain
+ *	Clocks:
+ *		MPU_FCLK, MPU_ICLK
+ *		INT_M_FCLK, INT_M_I_CLK
+ *
+ * - Individual clocks are hardware managed.
+ * - Base divider comes from: CM_CLKSEL_MPU
+ *
+ */
+static struct clk mpu_ck = {	/* Control cpu */
+	.name		= "mpu_ck",
+	.parent		= &core_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | RATE_CKCTL |
+				ALWAYS_ENABLED | CM_MPU_SEL1 | DELAYED_APP |
+				CONFIG_PARTICIPANT | RATE_PROPAGATES,
+	.rate_offset	= 0,	/* bits 0-4 */
+	.recalc		= &omap2_clksel_recalc,
+};
+
+/*
+ * DSP (2430-IVA2.1) (2420-UMA+IVA1) clock domain
+ * Clocks:
+ *	2430: IVA2.1_FCLK, IVA2.1_ICLK
+ *	2420: UMA_FCLK, UMA_ICLK, IVA_MPU, IVA_COP
+ */
+static struct clk iva2_1_fck = {
+	.name		= "iva2_1_fck",
+	.parent		= &core_ck,
+	.flags		= CLOCK_IN_OMAP243X | RATE_CKCTL | CM_DSP_SEL1 |
+				DELAYED_APP | RATE_PROPAGATES |
+				CONFIG_PARTICIPANT,
+	.rate_offset	= 0,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_DSP,
+	.enable_bit	= 0,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk iva2_1_ick = {
+	.name		= "iva2_1_ick",
+	.parent		= &iva2_1_fck,
+	.flags		= CLOCK_IN_OMAP243X | RATE_CKCTL | CM_DSP_SEL1 |
+				DELAYED_APP | CONFIG_PARTICIPANT,
+	.rate_offset	= 5,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+/*
+ * Won't be too specific here. The core clock comes into this block
+ * it is divided then tee'ed. One branch goes directly to xyz enable
+ * controls. The other branch gets further divided by 2 then possibly
+ * routed into a synchronizer and out of clocks abc.
+ */
+static struct clk dsp_fck = {
+	.name		= "dsp_fck",
+	.parent		= &core_ck,
+	.flags		= CLOCK_IN_OMAP242X | RATE_CKCTL | CM_DSP_SEL1 |
+			DELAYED_APP | CONFIG_PARTICIPANT | RATE_PROPAGATES,
+	.rate_offset	= 0,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_DSP,
+	.enable_bit	= 0,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk dsp_ick = {
+	.name		= "dsp_ick",	 /* apparently ipi and isp */
+	.parent		= &dsp_fck,
+	.flags		= CLOCK_IN_OMAP242X | RATE_CKCTL | CM_DSP_SEL1 |
+				DELAYED_APP | CONFIG_PARTICIPANT,
+	.rate_offset = 5,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_DSP,
+	.enable_bit	= 1,		/* for ipi */
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk iva1_ifck = {
+	.name		= "iva1_ifck",
+	.parent		= &core_ck,
+	.flags		= CLOCK_IN_OMAP242X | CM_DSP_SEL1 | RATE_CKCTL |
+			CONFIG_PARTICIPANT | RATE_PROPAGATES | DELAYED_APP,
+	.rate_offset= 8,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_DSP,
+	.enable_bit	= 10,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+/* IVA1 mpu/int/i/f clocks are /2 of parent */
+static struct clk iva1_mpu_int_ifck = {
+	.name		= "iva1_mpu_int_ifck",
+	.parent		= &iva1_ifck,
+	.flags		= CLOCK_IN_OMAP242X | RATE_CKCTL | CM_DSP_SEL1,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_DSP,
+	.enable_bit	= 8,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+/*
+ * L3 clock domain
+ * L3 clocks are used for both interface and functional clocks to
+ * multiple entities. Some of these clocks are completely managed
+ * by hardware, and some others allow software control. Hardware
+ * managed ones general are based on directly CLK_REQ signals and
+ * various auto idle settings. The functional spec sets many of these
+ * as 'tie-high' for their enables.
+ *
+ * I-CLOCKS:
+ *	L3-Interconnect, SMS, GPMC, SDRC, OCM_RAM, OCM_ROM, SDMA
+ *	CAM, HS-USB.
+ * F-CLOCK
+ *	SSI.
+ *
+ * GPMC memories and SDRC have timing and clock sensitive registers which
+ * may very well need notification when the clock changes. Currently for low
+ * operating points, these are taken care of in sleep.S.
+ */
+static struct clk core_l3_ck = {	/* Used for ick and fck, interconnect */
+	.name		= "core_l3_ck",
+	.parent		= &core_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_CKCTL | ALWAYS_ENABLED | CM_CORE_SEL1 |
+				DELAYED_APP | CONFIG_PARTICIPANT |
+				RATE_PROPAGATES,
+	.rate_offset	= 0,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk usb_l4_ick = {	/* FS-USB interface clock */
+	.name		= "usb_l4_ick",
+	.parent		= &core_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP |
+				CONFIG_PARTICIPANT,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 0,
+	.rate_offset = 25,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+/*
+ * SSI is in L3 management domain, its direct parent is core not l3,
+ * many core power domain entities are grouped into the L3 clock
+ * domain.
+ * SSI_SSR_FCLK, SSI_SST_FCLK, SSI_L4_CLIK
+ *
+ * ssr = core/1/2/3/4/5, sst = 1/2 ssr.
+ */
+static struct clk ssi_ssr_sst_fck = {
+	.name		= "ssi_fck",
+	.parent		= &core_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,	/* bit 1 */
+	.enable_bit	= 1,
+	.rate_offset = 20,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+/*
+ * GFX clock domain
+ *	Clocks:
+ * GFX_FCLK, GFX_ICLK
+ * GFX_CG1(2d), GFX_CG2(3d)
+ *
+ * GFX_FCLK runs from L3, and is divided by (1,2,3,4)
+ * The 2d and 3d clocks run at a hardware determined
+ * divided value of fclk.
+ *
+ */
+static struct clk gfx_3d_fck = {
+	.name		= "gfx_3d_fck",
+	.parent		= &core_l3_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_CKCTL | CM_GFX_SEL1,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_GFX,
+	.enable_bit	= 2,
+	.rate_offset= 0,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk gfx_2d_fck = {
+	.name		= "gfx_2d_fck",
+	.parent		= &core_l3_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_CKCTL | CM_GFX_SEL1,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_GFX,
+	.enable_bit	= 1,
+	.rate_offset= 0,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk gfx_ick = {
+	.name		= "gfx_ick",		/* From l3 */
+	.parent		= &core_l3_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_CKCTL,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_GFX,	/* bit 0 */
+	.enable_bit	= 0,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+/*
+ * Modem clock domain (2430)
+ *	CLOCKS:
+ *		MDM_OSC_CLK
+ *		MDM_ICLK
+ */
+static struct clk mdm_ick = {		/* used both as a ick and fck */
+	.name		= "mdm_ick",
+	.parent		= &core_ck,
+	.flags		= CLOCK_IN_OMAP243X | RATE_CKCTL | CM_MODEM_SEL1 |
+				DELAYED_APP | CONFIG_PARTICIPANT,
+	.rate_offset	= 0,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_MDM,
+	.enable_bit	= 0,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk mdm_osc_ck = {
+	.name		= "mdm_osc_ck",
+	.rate		= 26000000,
+	.parent		= &osc_ck,
+	.flags		= CLOCK_IN_OMAP243X | RATE_FIXED,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_MDM,
+	.enable_bit	= 1,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+/*
+ * L4 clock management domain
+ *
+ * This domain contains lots of interface clocks from the L4 interface, some
+ * functional clocks.	Fixed APLL functional source clocks are managed in
+ * this domain.
+ */
+static struct clk l4_ck = {		/* used both as an ick and fck */
+	.name		= "l4_ck",
+	.parent		= &core_l3_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_CKCTL | ALWAYS_ENABLED | CM_CORE_SEL1 |
+				DELAYED_APP | RATE_PROPAGATES,
+	.rate_offset	= 5,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk ssi_l4_ick = {
+	.name		= "ssi_l4_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | RATE_CKCTL,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,	/* bit 1 */
+	.enable_bit	= 1,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+/*
+ * DSS clock domain
+ * CLOCKs:
+ * DSS_L4_ICLK, DSS_L3_ICLK,
+ * DSS_CLK1, DSS_CLK2, DSS_54MHz_CLK
+ *
+ * DSS is both initiator and target.
+ */
+static struct clk dss_ick = {		/* Enables both L3,L4 ICLK's */
+	.name		= "dss_ick",
+	.parent		= &l4_ck,	/* really both l3 and l4 */
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | RATE_CKCTL,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 0,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk dss1_fck = {
+	.name		= "dss1_fck",
+	.parent		= &core_ck,		/* Core or sys */
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 0,
+	.rate_offset	= 8,
+	.src_offset	= 8,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk dss2_fck = {		/* Alt clk used in power management */
+	.name		= "dss2_fck",
+	.parent		= &sys_ck,		/* fixed at sys_ck or 48MHz */
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_CKCTL | CM_CORE_SEL1 | RATE_FIXED,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 1,
+	.src_offset	= 13,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk dss_54m_fck = {	/* Alt clk used in power management */
+	.name		= "dss_54m_fck",	/* 54m tv clk */
+	.parent		= &func_54m_ck,
+	.rate		= 54000000,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				RATE_FIXED | RATE_PROPAGATES,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 2,
+	.recalc		= &omap2_propagate_rate,
+};
+
+/*
+ * CORE power domain ICLK & FCLK defines.
+ * Many of the these can have more than one possible parent. Entries
+ * here will likely have an L4 interface parent, and may have multiple
+ * functional clock parents.
+ */
+static struct clk gpt1_ick = {
+	.name		= "gpt1_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_WKUP,	/* Bit4 */
+	.enable_bit	= 0,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt1_fck = {
+	.name		= "gpt1_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_WKUP_SEL1,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_WKUP,
+	.enable_bit	= 0,
+	.src_offset	= 0,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt2_ick = {
+	.name		= "gpt2_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,	/* bit4 */
+	.enable_bit	= 0,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt2_fck = {
+	.name		= "gpt2_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 4,
+	.src_offset	= 2,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt3_ick = {
+	.name		= "gpt3_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,	/* Bit5 */
+	.enable_bit	= 5,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt3_fck = {
+	.name		= "gpt3_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 5,
+	.src_offset	= 4,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt4_ick = {
+	.name		= "gpt4_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,	/* Bit6 */
+	.enable_bit	= 6,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt4_fck = {
+	.name		= "gpt4_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 6,
+	.src_offset	= 6,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt5_ick = {
+	.name		= "gpt5_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,	 /* Bit7 */
+	.enable_bit	= 7,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt5_fck = {
+	.name		= "gpt5_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 7,
+	.src_offset	= 8,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt6_ick = {
+	.name		= "gpt6_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_bit	= 8,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,	 /* bit8 */
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt6_fck = {
+	.name		= "gpt6_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 8,
+	.src_offset	= 10,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt7_ick = {
+	.name		= "gpt7_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,	 /* bit9 */
+	.enable_bit	= 9,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt7_fck = {
+	.name		= "gpt7_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 9,
+	.src_offset	= 12,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt8_ick = {
+	.name		= "gpt8_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,	 /* bit10 */
+	.enable_bit	= 10,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt8_fck = {
+	.name		= "gpt8_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 10,
+	.src_offset	= 14,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt9_ick = {
+	.name		= "gpt9_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 11,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt9_fck = {
+	.name		= "gpt9_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+					CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 11,
+	.src_offset	= 16,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt10_ick = {
+	.name		= "gpt10_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 12,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt10_fck = {
+	.name		= "gpt10_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+					CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 12,
+	.src_offset	= 18,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt11_ick = {
+	.name		= "gpt11_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 13,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt11_fck = {
+	.name		= "gpt11_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+					CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 13,
+	.src_offset	= 20,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt12_ick = {
+	.name		= "gpt12_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,	 /* bit14 */
+	.enable_bit	= 14,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpt12_fck = {
+	.name		= "gpt12_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+					CM_CORE_SEL2,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 14,
+	.src_offset	= 22,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp1_ick = {
+	.name		= "mcbsp1_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_bit	= 15,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,	 /* bit16 */
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp1_fck = {
+	.name		= "mcbsp1_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_bit	= 15,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp2_ick = {
+	.name		= "mcbsp2_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_bit	= 16,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp2_fck = {
+	.name		= "mcbsp2_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_bit	= 16,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp3_ick = {
+	.name		= "mcbsp3_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 3,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp3_fck = {
+	.name		= "mcbsp3_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 3,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp4_ick = {
+	.name		= "mcbsp4_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 4,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp4_fck = {
+	.name		= "mcbsp4_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 4,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp5_ick = {
+	.name		= "mcbsp5_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 5,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp5_fck = {
+	.name		= "mcbsp5_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 5,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcspi1_ick = {
+	.name		= "mcspi1_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 17,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcspi1_fck = {
+	.name		= "mcspi1_fck",
+	.parent		= &func_48m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 17,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcspi2_ick = {
+	.name		= "mcspi2_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 18,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcspi2_fck = {
+	.name		= "mcspi2_fck",
+	.parent		= &func_48m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 18,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcspi3_ick = {
+	.name		= "mcspi3_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 9,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mcspi3_fck = {
+	.name		= "mcspi3_fck",
+	.parent		= &func_48m_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 9,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk uart1_ick = {
+	.name		= "uart1_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 21,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk uart1_fck = {
+	.name		= "uart1_fck",
+	.parent		= &func_48m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 21,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk uart2_ick = {
+	.name		= "uart2_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 22,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk uart2_fck = {
+	.name		= "uart2_fck",
+	.parent		= &func_48m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 22,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk uart3_ick = {
+	.name		= "uart3_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 2,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk uart3_fck = {
+	.name		= "uart3_fck",
+	.parent		= &func_48m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 2,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpios_ick = {
+	.name		= "gpios_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_WKUP,
+	.enable_bit	= 2,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpios_fck = {
+	.name		= "gpios_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_WKUP,
+	.enable_bit	= 2,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mpu_wdt_ick = {
+	.name		= "mpu_wdt_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_WKUP,
+	.enable_bit	= 3,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mpu_wdt_fck = {
+	.name		= "mpu_wdt_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN_WKUP,
+	.enable_bit	= 3,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk sync_32k_ick = {
+	.name		= "sync_32k_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_WKUP,
+	.enable_bit	= 1,
+	.recalc		= &omap2_followparent_recalc,
+};
+static struct clk wdt1_ick = {
+	.name		= "wdt1_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_WKUP,
+	.enable_bit	= 4,
+	.recalc		= &omap2_followparent_recalc,
+};
+static struct clk omapctrl_ick = {
+	.name		= "omapctrl_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_WKUP,
+	.enable_bit	= 5,
+	.recalc		= &omap2_followparent_recalc,
+};
+static struct clk icr_ick = {
+	.name		= "icr_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN_WKUP,
+	.enable_bit	= 6,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk cam_ick = {
+	.name		= "cam_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 31,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk cam_fck = {
+	.name		= "cam_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 31,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mailboxes_ick = {
+	.name		= "mailboxes_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 30,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk wdt4_ick = {
+	.name		= "wdt4_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 29,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk wdt4_fck = {
+	.name		= "wdt4_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 29,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk wdt3_ick = {
+	.name		= "wdt3_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 28,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk wdt3_fck = {
+	.name		= "wdt3_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 28,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mspro_ick = {
+	.name		= "mspro_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 27,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mspro_fck = {
+	.name		= "mspro_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 27,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mmc_ick = {
+	.name		= "mmc_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 26,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mmc_fck = {
+	.name		= "mmc_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 26,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk fac_ick = {
+	.name		= "fac_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 25,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk fac_fck = {
+	.name		= "fac_fck",
+	.parent		= &func_12m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 25,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk eac_ick = {
+	.name		= "eac_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 24,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk eac_fck = {
+	.name		= "eac_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 24,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk hdq_ick = {
+	.name		= "hdq_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 23,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk hdq_fck = {
+	.name		= "hdq_fck",
+	.parent		= &func_12m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 23,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk i2c2_ick = {
+	.name		= "i2c2_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 20,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk i2c2_fck = {
+	.name		= "i2c2_fck",
+	.parent		= &func_12m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 20,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk i2chs2_fck = {
+	.name		= "i2chs2_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 20,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk i2c1_ick = {
+	.name		= "i2c1_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 19,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk i2c1_fck = {
+	.name		= "i2c1_fck",
+	.parent		= &func_12m_ck,
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 19,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk i2chs1_fck = {
+	.name		= "i2chs1_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 19,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk vlynq_ick = {
+	.name		= "vlynq_ick",
+	.parent		= &core_l3_ck,
+	.flags		= CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN1_CORE,
+	.enable_bit	= 3,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk vlynq_fck = {
+	.name		= "vlynq_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP242X  | RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN1_CORE,
+	.enable_bit	= 3,
+	.src_offset	= 15,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk sdrc_ick = {
+	.name		= "sdrc_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN3_CORE,
+	.enable_bit	= 2,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk des_ick = {
+	.name		= "des_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN4_CORE,
+	.enable_bit	= 0,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk sha_ick = {
+	.name		= "sha_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN4_CORE,
+	.enable_bit	= 1,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk rng_ick = {
+	.name		= "rng_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN4_CORE,
+	.enable_bit	= 2,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk aes_ick = {
+	.name		= "aes_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN4_CORE,
+	.enable_bit	= 3,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk pka_ick = {
+	.name		= "pka_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN4_CORE,
+	.enable_bit	= 4,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk usb_fck = {
+	.name		= "usb_fck",
+	.parent		= &func_48m_ck,
+	.flags		= CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 0,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk usbhs_ick = {
+	.name		= "usbhs_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 6,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mmchs1_ick = {
+	.name		= "mmchs1_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 7,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mmchs1_fck = {
+	.name		= "mmchs1_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 7,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mmchs2_ick = {
+	.name		= "mmchs2_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 8,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mmchs2_fck = {
+	.name		= "mmchs2_fck",
+	.parent		= &func_96m_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 8,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpio5_ick = {
+	.name		= "gpio5_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 10,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk gpio5_fck = {
+	.name		= "gpio5_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 10,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mdm_intc_ick = {
+	.name		= "mdm_intc_ick",
+	.parent		= &l4_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_ICLKEN2_CORE,
+	.enable_bit	= 11,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mmchsdb1_fck = {
+	.name		= "mmchsdb1_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 16,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+static struct clk mmchsdb2_fck = {
+	.name		= "mmchsdb2_fck",
+	.parent		= &func_32k_ck,
+	.flags		= CLOCK_IN_OMAP243X,
+	.enable_reg	= (void __iomem *)&CM_FCLKEN2_CORE,
+	.enable_bit	= 17,
+	.recalc		= &omap2_followparent_recalc,
+};
+
+/*
+ * This clock is a composite clock which does entire set changes then
+ * forces a rebalance. It keys on the MPU speed, but it really could
+ * be any key speed part of a set in the rate table.
+ *
+ * to really change a set, you need memory table sets which get changed
+ * in sram, pre-notifiers & post notifiers, changing the top set, without
+ * having low level display recalc's won't work... this is why dpm notifiers
+ * work, isr's off, walk a list of clocks already _off_ and not messing with
+ * the bus.
+ *
+ * This clock should have no parent. It embodies the entire upper level
+ * active set. A parent will mess up some of the init also.
+ */
+static struct clk virt_prcm_set = {
+	.name		= "virt_prcm_set",
+	.flags		= CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+				VIRTUAL_CLOCK | ALWAYS_ENABLED | DELAYED_APP,
+	.parent		= &mpu_ck,	/* Indexed by mpu speed, no parent */
+	.recalc		= &omap2_mpu_recalc,	/* sets are keyed on mpu rate */
+	.set_rate	= &omap2_select_table_rate,
+	.round_rate	= &omap2_round_to_table_rate,
+};
+
+static struct clk *onchip_clks[] = {
+	/* external root sources */
+	&func_32k_ck,
+	&osc_ck,
+	&sys_ck,
+	&alt_ck,
+	/* internal analog sources */
+	&dpll_ck,
+	&apll96_ck,
+	&apll54_ck,
+	/* internal prcm root sources */
+	&func_54m_ck,
+	&core_ck,
+	&sleep_ck,
+	&func_96m_ck,
+	&func_48m_ck,
+	&func_12m_ck,
+	&wdt1_osc_ck,
+	&sys_clkout,
+	&sys_clkout2,
+	/* mpu domain clocks */
+	&mpu_ck,
+	/* dsp domain clocks */
+	&iva2_1_fck,		/* 2430 */
+	&iva2_1_ick,
+	&dsp_ick,		/* 2420 */
+	&dsp_fck,
+	&iva1_ifck,
+	&iva1_mpu_int_ifck,
+	/* GFX domain clocks */
+	&gfx_3d_fck,
+	&gfx_2d_fck,
+	&gfx_ick,
+	/* Modem domain clocks */
+	&mdm_ick,
+	&mdm_osc_ck,
+	/* DSS domain clocks */
+	&dss_ick,
+	&dss1_fck,
+	&dss2_fck,
+	&dss_54m_fck,
+	/* L3 domain clocks */
+	&core_l3_ck,
+	&ssi_ssr_sst_fck,
+	&usb_l4_ick,
+	/* L4 domain clocks */
+	&l4_ck,			/* used as both core_l4 and wu_l4 */
+	&ssi_l4_ick,
+	/* virtual meta-group clock */
+	&virt_prcm_set,
+	/* general l4 interface ck, multi-parent functional clk */
+	&gpt1_ick,
+	&gpt1_fck,
+	&gpt2_ick,
+	&gpt2_fck,
+	&gpt3_ick,
+	&gpt3_fck,
+	&gpt4_ick,
+	&gpt4_fck,
+	&gpt5_ick,
+	&gpt5_fck,
+	&gpt6_ick,
+	&gpt6_fck,
+	&gpt7_ick,
+	&gpt7_fck,
+	&gpt8_ick,
+	&gpt8_fck,
+	&gpt9_ick,
+	&gpt9_fck,
+	&gpt10_ick,
+	&gpt10_fck,
+	&gpt11_ick,
+	&gpt11_fck,
+	&gpt12_ick,
+	&gpt12_fck,
+	&mcbsp1_ick,
+	&mcbsp1_fck,
+	&mcbsp2_ick,
+	&mcbsp2_fck,
+	&mcbsp3_ick,
+	&mcbsp3_fck,
+	&mcbsp4_ick,
+	&mcbsp4_fck,
+	&mcbsp5_ick,
+	&mcbsp5_fck,
+	&mcspi1_ick,
+	&mcspi1_fck,
+	&mcspi2_ick,
+	&mcspi2_fck,
+	&mcspi3_ick,
+	&mcspi3_fck,
+	&uart1_ick,
+	&uart1_fck,
+	&uart2_ick,
+	&uart2_fck,
+	&uart3_ick,
+	&uart3_fck,
+	&gpios_ick,
+	&gpios_fck,
+	&mpu_wdt_ick,
+	&mpu_wdt_fck,
+	&sync_32k_ick,
+	&wdt1_ick,
+	&omapctrl_ick,
+	&icr_ick,
+	&cam_fck,
+	&cam_ick,
+	&mailboxes_ick,
+	&wdt4_ick,
+	&wdt4_fck,
+	&wdt3_ick,
+	&wdt3_fck,
+	&mspro_ick,
+	&mspro_fck,
+	&mmc_ick,
+	&mmc_fck,
+	&fac_ick,
+	&fac_fck,
+	&eac_ick,
+	&eac_fck,
+	&hdq_ick,
+	&hdq_fck,
+	&i2c1_ick,
+	&i2c1_fck,
+	&i2chs1_fck,
+	&i2c2_ick,
+	&i2c2_fck,
+	&i2chs2_fck,
+	&vlynq_ick,
+	&vlynq_fck,
+	&sdrc_ick,
+	&des_ick,
+	&sha_ick,
+	&rng_ick,
+	&aes_ick,
+	&pka_ick,
+	&usb_fck,
+	&usbhs_ick,
+	&mmchs1_ick,
+	&mmchs1_fck,
+	&mmchs2_ick,
+	&mmchs2_fck,
+	&gpio5_ick,
+	&gpio5_fck,
+	&mdm_intc_ick,
+	&mmchsdb1_fck,
+	&mmchsdb2_fck,
+};
+
+#endif
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
new file mode 100644
index 0000000..7181edb
--- /dev/null
+++ b/arch/arm/mach-omap2/devices.c
@@ -0,0 +1,89 @@
+/*
+ * linux/arch/arm/mach-omap2/devices.c
+ *
+ * OMAP2 platform device setup/initialization
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/tc.h>
+#include <asm/arch/board.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/gpio.h>
+
+extern void omap_nop_release(struct device *dev);
+
+/*-------------------------------------------------------------------------*/
+
+#if 	defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
+
+#define OMAP2_I2C_BASE2		0x48072000
+#define OMAP2_I2C_INT2		57
+
+static struct resource i2c_resources2[] = {
+	{
+		.start		= OMAP2_I2C_BASE2,
+		.end		= OMAP2_I2C_BASE2 + 0x3f,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= OMAP2_I2C_INT2,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device omap_i2c_device2 = {
+        .name           = "i2c_omap",
+        .id             = 2,
+        .dev = {
+                .release        = omap_nop_release,
+        },
+	.num_resources	= ARRAY_SIZE(i2c_resources2),
+	.resource	= i2c_resources2,
+};
+
+/* See also arch/arm/plat-omap/devices.c for first I2C on 24xx */
+static void omap_init_i2c(void)
+{
+	/* REVISIT: Second I2C not in use on H4? */
+	if (machine_is_omap_h4())
+		return;
+
+	omap_cfg_reg(J15_24XX_I2C2_SCL);
+	omap_cfg_reg(H19_24XX_I2C2_SDA);
+	(void) platform_device_register(&omap_i2c_device2);
+}
+
+#else
+
+static void omap_init_i2c(void) {}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static int __init omap2_init_devices(void)
+{
+	/* please keep these calls, and their implementations above,
+	 * in alphabetical order so they're easier to sort through.
+	 */
+	omap_init_i2c();
+
+	return 0;
+}
+arch_initcall(omap2_init_devices);
+
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
new file mode 100644
index 0000000..7618730
--- /dev/null
+++ b/arch/arm/mach-omap2/id.c
@@ -0,0 +1,124 @@
+/*
+ * linux/arch/arm/mach-omap2/id.c
+ *
+ * OMAP2 CPU identification code
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+
+#define OMAP24XX_TAP_BASE	io_p2v(0x48014000)
+
+#define OMAP_TAP_IDCODE		0x0204
+#define OMAP_TAP_PROD_ID	0x0208
+
+#define OMAP_TAP_DIE_ID_0	0x0218
+#define OMAP_TAP_DIE_ID_1	0x021C
+#define OMAP_TAP_DIE_ID_2	0x0220
+#define OMAP_TAP_DIE_ID_3	0x0224
+
+/* system_rev fields for OMAP2 processors:
+ *   CPU id bits     [31:16],
+ *   CPU device type [15:12], (unprg,normal,POP)
+ *   CPU revision    [11:08]
+ *   CPU class bits  [07:00]
+ */
+
+struct omap_id {
+	u16	hawkeye;	/* Silicon type (Hawkeye id) */
+	u8	dev;		/* Device type from production_id reg */
+	u32	type;		/* combined type id copied to system_rev */
+};
+
+/* Register values to detect the OMAP version */
+static struct omap_id omap_ids[] __initdata = {
+	{ .hawkeye = 0xb5d9, .dev = 0x0, .type = 0x24200000 },
+	{ .hawkeye = 0xb5d9, .dev = 0x1, .type = 0x24201000 },
+	{ .hawkeye = 0xb5d9, .dev = 0x2, .type = 0x24202000 },
+	{ .hawkeye = 0xb5d9, .dev = 0x4, .type = 0x24220000 },
+	{ .hawkeye = 0xb5d9, .dev = 0x8, .type = 0x24230000 },
+	{ .hawkeye = 0xb68a, .dev = 0x0, .type = 0x24300000 },
+};
+
+static u32 __init read_tap_reg(int reg)
+{
+	return __raw_readl(OMAP24XX_TAP_BASE + reg);
+}
+
+void __init omap2_check_revision(void)
+{
+	int i, j;
+	u32 idcode;
+	u32 prod_id;
+	u16 hawkeye;
+	u8  dev_type;
+	u8  rev;
+
+	idcode = read_tap_reg(OMAP_TAP_IDCODE);
+	prod_id = read_tap_reg(OMAP_TAP_PROD_ID);
+	hawkeye = (idcode >> 12) & 0xffff;
+	rev = (idcode >> 28) & 0x0f;
+	dev_type = (prod_id >> 16) & 0x0f;
+
+#ifdef DEBUG
+	printk(KERN_DEBUG "OMAP_TAP_IDCODE 0x%08x REV %i HAWKEYE 0x%04x MANF %03x\n",
+		idcode, rev, hawkeye, (idcode >> 1) & 0x7ff);
+	printk(KERN_DEBUG "OMAP_TAP_DIE_ID_0: 0x%08x\n",
+		read_tap_reg(OMAP_TAP_DIE_ID_0));
+	printk(KERN_DEBUG "OMAP_TAP_DIE_ID_1: 0x%08x DEV_REV: %i\n",
+		read_tap_reg(OMAP_TAP_DIE_ID_1),
+	       (read_tap_reg(OMAP_TAP_DIE_ID_1) >> 28) & 0xf);
+	printk(KERN_DEBUG "OMAP_TAP_DIE_ID_2: 0x%08x\n",
+		read_tap_reg(OMAP_TAP_DIE_ID_2));
+	printk(KERN_DEBUG "OMAP_TAP_DIE_ID_3: 0x%08x\n",
+		read_tap_reg(OMAP_TAP_DIE_ID_3));
+	printk(KERN_DEBUG "OMAP_TAP_PROD_ID_0: 0x%08x DEV_TYPE: %i\n",
+		prod_id, dev_type);
+#endif
+
+	/* Check hawkeye ids */
+	for (i = 0; i < ARRAY_SIZE(omap_ids); i++) {
+		if (hawkeye == omap_ids[i].hawkeye)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(omap_ids)) {
+		printk(KERN_ERR "Unknown OMAP CPU id\n");
+		return;
+	}
+
+	for (j = i; j < ARRAY_SIZE(omap_ids); j++) {
+		if (dev_type == omap_ids[j].dev)
+			break;
+	}
+
+	if (j == ARRAY_SIZE(omap_ids)) {
+		printk(KERN_ERR "Unknown OMAP device type. "
+				"Handling it as OMAP%04x\n",
+				omap_ids[i].type >> 16);
+		j = i;
+	}
+	system_rev = omap_ids[j].type;
+
+	system_rev |= rev << 8;
+
+	/* Add the cpu class info (24xx) */
+	system_rev |= 0x24;
+
+	pr_info("OMAP%04x", system_rev >> 16);
+	if ((system_rev >> 8) & 0x0f)
+		printk("%x", (system_rev >> 8) & 0x0f);
+	printk("\n");
+}
+
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
new file mode 100644
index 0000000..8ea67bf
--- /dev/null
+++ b/arch/arm/mach-omap2/io.c
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/arm/mach-omap2/io.c
+ *
+ * OMAP2 I/O mapping code
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/mach/map.h>
+#include <asm/io.h>
+#include <asm/arch/mux.h>
+
+extern void omap_sram_init(void);
+extern int omap2_clk_init(void);
+extern void omap2_check_revision(void);
+
+/*
+ * The machine specific code may provide the extra mapping besides the
+ * default mapping provided here.
+ */
+static struct map_desc omap2_io_desc[] __initdata = {
+	{
+		.virtual	= L3_24XX_VIRT,
+		.pfn		= __phys_to_pfn(L3_24XX_PHYS),
+		.length		= L3_24XX_SIZE,
+		.type		= MT_DEVICE
+	},
+	{
+		.virtual	= L4_24XX_VIRT,
+		.pfn		= __phys_to_pfn(L4_24XX_PHYS),
+		.length		= L4_24XX_SIZE,
+		.type		= MT_DEVICE
+	}
+};
+
+void __init omap_map_common_io(void)
+{
+	iotable_init(omap2_io_desc, ARRAY_SIZE(omap2_io_desc));
+	omap2_check_revision();
+	omap_sram_init();
+	omap2_mux_init();
+	omap2_clk_init();
+}
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
new file mode 100644
index 0000000..d7baff6
--- /dev/null
+++ b/arch/arm/mach-omap2/irq.c
@@ -0,0 +1,149 @@
+/*
+ * linux/arch/arm/mach-omap/omap2/irq.c
+ *
+ * Interrupt handler for OMAP2 boards.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <asm/hardware.h>
+#include <asm/mach/irq.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#define INTC_REVISION	0x0000
+#define INTC_SYSCONFIG	0x0010
+#define INTC_SYSSTATUS	0x0014
+#define INTC_CONTROL	0x0048
+#define INTC_MIR_CLEAR0	0x0088
+#define INTC_MIR_SET0	0x008c
+
+/*
+ * OMAP2 has a number of different interrupt controllers, each interrupt
+ * controller is identified as its own "bank". Register definitions are
+ * fairly consistent for each bank, but not all registers are implemented
+ * for each bank.. when in doubt, consult the TRM.
+ */
+static struct omap_irq_bank {
+	unsigned long base_reg;
+	unsigned int nr_irqs;
+} __attribute__ ((aligned(4))) irq_banks[] = {
+	{
+		/* MPU INTC */
+		.base_reg	= OMAP24XX_IC_BASE,
+		.nr_irqs	= 96,
+	}, {
+		/* XXX: DSP INTC */
+
+#if 0
+	/*
+	 * Commented out for now until we fix the IVA clocking
+	 */
+#ifdef CONFIG_ARCH_OMAP2420
+	}, {
+		/* IVA INTC (2420 only) */
+		.base_reg	= OMAP24XX_IVA_INTC_BASE,
+		.nr_irqs	= 16,	/* Actually 32, but only 16 are used */
+#endif
+#endif
+	}
+};
+
+/* XXX: FIQ and additional INTC support (only MPU at the moment) */
+static void omap_ack_irq(unsigned int irq)
+{
+	omap_writel(0x1, irq_banks[0].base_reg + INTC_CONTROL);
+}
+
+static void omap_mask_irq(unsigned int irq)
+{
+	int offset = (irq >> 5) << 5;
+
+	if (irq >= 64) {
+		irq %= 64;
+	} else if (irq >= 32) {
+		irq %= 32;
+	}
+
+	omap_writel(1 << irq, irq_banks[0].base_reg + INTC_MIR_SET0 + offset);
+}
+
+static void omap_unmask_irq(unsigned int irq)
+{
+	int offset = (irq >> 5) << 5;
+
+	if (irq >= 64) {
+		irq %= 64;
+	} else if (irq >= 32) {
+		irq %= 32;
+	}
+
+	omap_writel(1 << irq, irq_banks[0].base_reg + INTC_MIR_CLEAR0 + offset);
+}
+
+static void omap_mask_ack_irq(unsigned int irq)
+{
+	omap_mask_irq(irq);
+	omap_ack_irq(irq);
+}
+
+static struct irqchip omap_irq_chip = {
+	.ack	= omap_mask_ack_irq,
+	.mask	= omap_mask_irq,
+	.unmask	= omap_unmask_irq,
+};
+
+static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank)
+{
+	unsigned long tmp;
+
+	tmp = omap_readl(bank->base_reg + INTC_REVISION) & 0xff;
+	printk(KERN_INFO "IRQ: Found an INTC at 0x%08lx "
+			 "(revision %ld.%ld) with %d interrupts\n",
+			 bank->base_reg, tmp >> 4, tmp & 0xf, bank->nr_irqs);
+
+	tmp = omap_readl(bank->base_reg + INTC_SYSCONFIG);
+	tmp |= 1 << 1;	/* soft reset */
+	omap_writel(tmp, bank->base_reg + INTC_SYSCONFIG);
+
+	while (!(omap_readl(bank->base_reg + INTC_SYSSTATUS) & 0x1))
+		/* Wait for reset to complete */;
+}
+
+void __init omap_init_irq(void)
+{
+	unsigned long nr_irqs = 0;
+	unsigned int nr_banks = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(irq_banks); i++) {
+		struct omap_irq_bank *bank = irq_banks + i;
+
+		/* XXX */
+		if (!bank->base_reg)
+			continue;
+
+		omap_irq_bank_init_one(bank);
+
+		nr_irqs += bank->nr_irqs;
+		nr_banks++;
+	}
+
+	printk(KERN_INFO "Total of %ld interrupts on %d active controller%s\n",
+	       nr_irqs, nr_banks, nr_banks > 1 ? "s" : "");
+
+	for (i = 0; i < nr_irqs; i++) {
+		set_irq_chip(i, &omap_irq_chip);
+		set_irq_handler(i, do_level_IRQ);
+		set_irq_flags(i, IRQF_VALID);
+	}
+}
+
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
new file mode 100644
index 0000000..ea46548
--- /dev/null
+++ b/arch/arm/mach-omap2/mux.c
@@ -0,0 +1,65 @@
+/*
+ * linux/arch/arm/mach-omap2/mux.c
+ *
+ * OMAP1 pin multiplexing configurations
+ *
+ * Copyright (C) 2003 - 2005 Nokia Corporation
+ *
+ * Written by Tony Lindgren <tony.lindgren@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/spinlock.h>
+
+#include <asm/arch/mux.h>
+
+#ifdef CONFIG_OMAP_MUX
+
+/* NOTE: See mux.h for the enumeration */
+
+struct pin_config __initdata_or_module omap24xx_pins[] = {
+/*
+ *	description			mux	mux	pull	pull	debug
+ *					offset	mode	ena	type
+ */
+
+/* 24xx I2C */
+MUX_CFG_24XX("M19_24XX_I2C1_SCL",	0x111,	0,	0,	0,	1)
+MUX_CFG_24XX("L15_24XX_I2C1_SDA",	0x112,	0,	0,	0,	1)
+MUX_CFG_24XX("J15_24XX_I2C2_SCL",	0x113,	0,	0,	0,	1)
+MUX_CFG_24XX("H19_24XX_I2C2_SDA",	0x114,	0,	0,	0,	1)
+
+/* Menelaus interrupt */
+MUX_CFG_24XX("W19_24XX_SYS_NIRQ",	0x12c,	0,	1,	1,	1)
+
+/* 24xx GPIO */
+MUX_CFG_24XX("Y20_24XX_GPIO60",		0x12c,	3,	0,	0,	1)
+MUX_CFG_24XX("M15_24XX_GPIO92",		0x10a,	3,	0,	0,	1)
+
+};
+
+int __init omap2_mux_init(void)
+{
+	omap_mux_register(omap24xx_pins, ARRAY_SIZE(omap24xx_pins));
+	return 0;
+}
+
+#endif
diff --git a/arch/arm/mach-omap2/prcm.h b/arch/arm/mach-omap2/prcm.h
new file mode 100644
index 0000000..2eb89b9
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm.h
@@ -0,0 +1,419 @@
+/*
+ * prcm.h - Access definations for use in OMAP24XX clock and power management
+ *
+ * Copyright (C) 2005 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_ARCH_DPM_PRCM_H
+#define __ASM_ARM_ARCH_DPM_PRCM_H
+
+/* SET_PERFORMANCE_LEVEL PARAMETERS */
+#define PRCM_HALF_SPEED 1
+#define PRCM_FULL_SPEED 2
+
+#ifndef __ASSEMBLER__
+
+#define PRCM_REG32(offset)	__REG32(OMAP24XX_PRCM_BASE + (offset))
+
+#define PRCM_REVISION		PRCM_REG32(0x000)
+#define PRCM_SYSCONFIG		PRCM_REG32(0x010)
+#define PRCM_IRQSTATUS_MPU	PRCM_REG32(0x018)
+#define PRCM_IRQENABLE_MPU	PRCM_REG32(0x01C)
+#define PRCM_VOLTCTRL		PRCM_REG32(0x050)
+#define PRCM_VOLTST		PRCM_REG32(0x054)
+#define PRCM_CLKSRC_CTRL	PRCM_REG32(0x060)
+#define PRCM_CLKOUT_CTRL	PRCM_REG32(0x070)
+#define PRCM_CLKEMUL_CTRL	PRCM_REG32(0x078)
+#define PRCM_CLKCFG_CTRL	PRCM_REG32(0x080)
+#define PRCM_CLKCFG_STATUS	PRCM_REG32(0x084)
+#define PRCM_VOLTSETUP		PRCM_REG32(0x090)
+#define PRCM_CLKSSETUP		PRCM_REG32(0x094)
+#define PRCM_POLCTRL		PRCM_REG32(0x098)
+
+/* GENERAL PURPOSE */
+#define GENERAL_PURPOSE1	PRCM_REG32(0x0B0)
+#define GENERAL_PURPOSE2	PRCM_REG32(0x0B4)
+#define GENERAL_PURPOSE3	PRCM_REG32(0x0B8)
+#define GENERAL_PURPOSE4	PRCM_REG32(0x0BC)
+#define GENERAL_PURPOSE5	PRCM_REG32(0x0C0)
+#define GENERAL_PURPOSE6	PRCM_REG32(0x0C4)
+#define GENERAL_PURPOSE7	PRCM_REG32(0x0C8)
+#define GENERAL_PURPOSE8	PRCM_REG32(0x0CC)
+#define GENERAL_PURPOSE9	PRCM_REG32(0x0D0)
+#define GENERAL_PURPOSE10	PRCM_REG32(0x0D4)
+#define GENERAL_PURPOSE11	PRCM_REG32(0x0D8)
+#define GENERAL_PURPOSE12	PRCM_REG32(0x0DC)
+#define GENERAL_PURPOSE13	PRCM_REG32(0x0E0)
+#define GENERAL_PURPOSE14	PRCM_REG32(0x0E4)
+#define GENERAL_PURPOSE15	PRCM_REG32(0x0E8)
+#define GENERAL_PURPOSE16	PRCM_REG32(0x0EC)
+#define GENERAL_PURPOSE17	PRCM_REG32(0x0F0)
+#define GENERAL_PURPOSE18	PRCM_REG32(0x0F4)
+#define GENERAL_PURPOSE19	PRCM_REG32(0x0F8)
+#define GENERAL_PURPOSE20	PRCM_REG32(0x0FC)
+
+/* MPU */
+#define CM_CLKSEL_MPU		PRCM_REG32(0x140)
+#define CM_CLKSTCTRL_MPU	PRCM_REG32(0x148)
+#define RM_RSTST_MPU		PRCM_REG32(0x158)
+#define PM_WKDEP_MPU		PRCM_REG32(0x1C8)
+#define PM_EVGENCTRL_MPU	PRCM_REG32(0x1D4)
+#define PM_EVEGENONTIM_MPU	PRCM_REG32(0x1D8)
+#define PM_EVEGENOFFTIM_MPU	PRCM_REG32(0x1DC)
+#define PM_PWSTCTRL_MPU		PRCM_REG32(0x1E0)
+#define PM_PWSTST_MPU		PRCM_REG32(0x1E4)
+
+/* CORE */
+#define CM_FCLKEN1_CORE		PRCM_REG32(0x200)
+#define CM_FCLKEN2_CORE		PRCM_REG32(0x204)
+#define CM_FCLKEN3_CORE		PRCM_REG32(0x208)
+#define CM_ICLKEN1_CORE		PRCM_REG32(0x210)
+#define CM_ICLKEN2_CORE		PRCM_REG32(0x214)
+#define CM_ICLKEN3_CORE		PRCM_REG32(0x218)
+#define CM_ICLKEN4_CORE		PRCM_REG32(0x21C)
+#define CM_IDLEST1_CORE		PRCM_REG32(0x220)
+#define CM_IDLEST2_CORE		PRCM_REG32(0x224)
+#define CM_IDLEST3_CORE		PRCM_REG32(0x228)
+#define CM_IDLEST4_CORE		PRCM_REG32(0x22C)
+#define CM_AUTOIDLE1_CORE	PRCM_REG32(0x230)
+#define CM_AUTOIDLE2_CORE	PRCM_REG32(0x234)
+#define CM_AUTOIDLE3_CORE	PRCM_REG32(0x238)
+#define CM_AUTOIDLE4_CORE	PRCM_REG32(0x23C)
+#define CM_CLKSEL1_CORE		PRCM_REG32(0x240)
+#define CM_CLKSEL2_CORE		PRCM_REG32(0x244)
+#define CM_CLKSTCTRL_CORE	PRCM_REG32(0x248)
+#define PM_WKEN1_CORE		PRCM_REG32(0x2A0)
+#define PM_WKEN2_CORE		PRCM_REG32(0x2A4)
+#define PM_WKST1_CORE		PRCM_REG32(0x2B0)
+#define PM_WKST2_CORE		PRCM_REG32(0x2B4)
+#define PM_WKDEP_CORE		PRCM_REG32(0x2C8)
+#define PM_PWSTCTRL_CORE	PRCM_REG32(0x2E0)
+#define PM_PWSTST_CORE		PRCM_REG32(0x2E4)
+
+/* GFX */
+#define CM_FCLKEN_GFX		PRCM_REG32(0x300)
+#define CM_ICLKEN_GFX		PRCM_REG32(0x310)
+#define CM_IDLEST_GFX		PRCM_REG32(0x320)
+#define CM_CLKSEL_GFX		PRCM_REG32(0x340)
+#define CM_CLKSTCTRL_GFX	PRCM_REG32(0x348)
+#define RM_RSTCTRL_GFX		PRCM_REG32(0x350)
+#define RM_RSTST_GFX		PRCM_REG32(0x358)
+#define PM_WKDEP_GFX		PRCM_REG32(0x3C8)
+#define PM_PWSTCTRL_GFX		PRCM_REG32(0x3E0)
+#define PM_PWSTST_GFX		PRCM_REG32(0x3E4)
+
+/* WAKE-UP */
+#define CM_FCLKEN_WKUP		PRCM_REG32(0x400)
+#define CM_ICLKEN_WKUP		PRCM_REG32(0x410)
+#define CM_IDLEST_WKUP		PRCM_REG32(0x420)
+#define CM_AUTOIDLE_WKUP	PRCM_REG32(0x430)
+#define CM_CLKSEL_WKUP		PRCM_REG32(0x440)
+#define RM_RSTCTRL_WKUP		PRCM_REG32(0x450)
+#define RM_RSTTIME_WKUP		PRCM_REG32(0x454)
+#define RM_RSTST_WKUP		PRCM_REG32(0x458)
+#define PM_WKEN_WKUP		PRCM_REG32(0x4A0)
+#define PM_WKST_WKUP		PRCM_REG32(0x4B0)
+
+/* CLOCKS */
+#define CM_CLKEN_PLL		PRCM_REG32(0x500)
+#define CM_IDLEST_CKGEN		PRCM_REG32(0x520)
+#define CM_AUTOIDLE_PLL		PRCM_REG32(0x530)
+#define CM_CLKSEL1_PLL		PRCM_REG32(0x540)
+#define CM_CLKSEL2_PLL		PRCM_REG32(0x544)
+
+/* DSP */
+#define CM_FCLKEN_DSP		PRCM_REG32(0x800)
+#define CM_ICLKEN_DSP		PRCM_REG32(0x810)
+#define CM_IDLEST_DSP		PRCM_REG32(0x820)
+#define CM_AUTOIDLE_DSP		PRCM_REG32(0x830)
+#define CM_CLKSEL_DSP		PRCM_REG32(0x840)
+#define CM_CLKSTCTRL_DSP	PRCM_REG32(0x848)
+#define RM_RSTCTRL_DSP		PRCM_REG32(0x850)
+#define RM_RSTST_DSP		PRCM_REG32(0x858)
+#define PM_WKEN_DSP		PRCM_REG32(0x8A0)
+#define PM_WKDEP_DSP		PRCM_REG32(0x8C8)
+#define PM_PWSTCTRL_DSP		PRCM_REG32(0x8E0)
+#define PM_PWSTST_DSP		PRCM_REG32(0x8E4)
+#define PRCM_IRQSTATUS_DSP	PRCM_REG32(0x8F0)
+#define PRCM_IRQENABLE_DSP	PRCM_REG32(0x8F4)
+
+/* IVA */
+#define PRCM_IRQSTATUS_IVA	PRCM_REG32(0x8F8)
+#define PRCM_IRQENABLE_IVA	PRCM_REG32(0x8FC)
+
+/* Modem on 2430 */
+#define CM_FCLKEN_MDM		PRCM_REG32(0xC00)
+#define CM_ICLKEN_MDM		PRCM_REG32(0xC10)
+#define CM_IDLEST_MDM		PRCM_REG32(0xC20)
+#define CM_CLKSEL_MDM		PRCM_REG32(0xC40)
+
+/* FIXME: Move to header for 2430 */
+#define DISP_BASE		(OMAP24XX_L4_IO_BASE+0x50000)
+#define DISP_REG32(offset)	__REG32(DISP_BASE + (offset))
+
+#define GPMC_BASE		(OMAP24XX_GPMC_BASE)
+#define GPMC_REG32(offset)	__REG32(GPMC_BASE + (offset))
+
+#define GPT1_BASE		(OMAP24XX_GPT1)
+#define GPT1_REG32(offset)	__REG32(GPT1_BASE + (offset))
+
+/* Misc sysconfig */
+#define DISPC_SYSCONFIG		DISP_REG32(0x410)
+#define SPI_BASE		(OMAP24XX_L4_IO_BASE+0x98000)
+#define MCSPI1_SYSCONFIG	__REG32(SPI_BASE + 0x10)
+#define MCSPI2_SYSCONFIG	__REG32(SPI_BASE+0x2000 + 0x10)
+
+//#define DSP_MMU_SYSCONFIG	0x5A000010
+#define CAMERA_MMU_SYSCONFIG	__REG32(DISP_BASE+0x2C10)
+//#define IVA_MMU_SYSCONFIG	0x5D000010
+//#define DSP_DMA_SYSCONFIG	0x00FCC02C
+#define CAMERA_DMA_SYSCONFIG	__REG32(DISP_BASE+0x282C)
+#define SYSTEM_DMA_SYSCONFIG	__REG32(DISP_BASE+0x602C)
+#define GPMC_SYSCONFIG		GPMC_REG32(0x010)
+#define MAILBOXES_SYSCONFIG	__REG32(OMAP24XX_L4_IO_BASE+0x94010)
+#define UART1_SYSCONFIG		__REG32(OMAP24XX_L4_IO_BASE+0x6A054)
+#define UART2_SYSCONFIG		__REG32(OMAP24XX_L4_IO_BASE+0x6C054)
+#define UART3_SYSCONFIG		__REG32(OMAP24XX_L4_IO_BASE+0x6E054)
+//#define IVA_SYSCONFIG		0x5C060010
+#define SDRC_SYSCONFIG		__REG32(OMAP24XX_SDRC_BASE+0x10)
+#define SMS_SYSCONFIG		__REG32(OMAP24XX_SMS_BASE+0x10)
+#define SSI_SYSCONFIG		__REG32(DISP_BASE+0x8010)
+//#define VLYNQ_SYSCONFIG	0x67FFFE10
+
+/* rkw - good cannidates for PM_ to start what nm was trying */
+#define OMAP24XX_GPT2		(OMAP24XX_L4_IO_BASE+0x2A000)
+#define OMAP24XX_GPT3		(OMAP24XX_L4_IO_BASE+0x78000)
+#define OMAP24XX_GPT4		(OMAP24XX_L4_IO_BASE+0x7A000)
+#define OMAP24XX_GPT5		(OMAP24XX_L4_IO_BASE+0x7C000)
+#define OMAP24XX_GPT6		(OMAP24XX_L4_IO_BASE+0x7E000)
+#define OMAP24XX_GPT7		(OMAP24XX_L4_IO_BASE+0x80000)
+#define OMAP24XX_GPT8		(OMAP24XX_L4_IO_BASE+0x82000)
+#define OMAP24XX_GPT9		(OMAP24XX_L4_IO_BASE+0x84000)
+#define OMAP24XX_GPT10		(OMAP24XX_L4_IO_BASE+0x86000)
+#define OMAP24XX_GPT11		(OMAP24XX_L4_IO_BASE+0x88000)
+#define OMAP24XX_GPT12		(OMAP24XX_L4_IO_BASE+0x8A000)
+
+#define GPTIMER1_SYSCONFIG	GPT1_REG32(0x010)
+#define GPTIMER2_SYSCONFIG	__REG32(OMAP24XX_GPT2 + 0x10)
+#define GPTIMER3_SYSCONFIG	__REG32(OMAP24XX_GPT3 + 0x10)
+#define GPTIMER4_SYSCONFIG	__REG32(OMAP24XX_GPT4 + 0x10)
+#define GPTIMER5_SYSCONFIG	__REG32(OMAP24XX_GPT5 + 0x10)
+#define GPTIMER6_SYSCONFIG	__REG32(OMAP24XX_GPT6 + 0x10)
+#define GPTIMER7_SYSCONFIG	__REG32(OMAP24XX_GPT7 + 0x10)
+#define GPTIMER8_SYSCONFIG	__REG32(OMAP24XX_GPT8 + 0x10)
+#define GPTIMER9_SYSCONFIG	__REG32(OMAP24XX_GPT9 + 0x10)
+#define GPTIMER10_SYSCONFIG	__REG32(OMAP24XX_GPT10 + 0x10)
+#define GPTIMER11_SYSCONFIG	__REG32(OMAP24XX_GPT11 + 0x10)
+#define GPTIMER12_SYSCONFIG	__REG32(OMAP24XX_GPT12 + 0x10)
+
+#define GPIOX_BASE(X)		(OMAP24XX_GPIO_BASE+(0x2000*((X)-1)))
+
+#define GPIO1_SYSCONFIG		__REG32((GPIOX_BASE(1)+0x10))
+#define GPIO2_SYSCONFIG		__REG32((GPIOX_BASE(2)+0x10))
+#define GPIO3_SYSCONFIG		__REG32((GPIOX_BASE(3)+0x10))
+#define GPIO4_SYSCONFIG		__REG32((GPIOX_BASE(4)+0x10))
+
+/* GP TIMER 1 */
+#define GPTIMER1_TISTAT		GPT1_REG32(0x014)
+#define GPTIMER1_TISR		GPT1_REG32(0x018)
+#define GPTIMER1_TIER		GPT1_REG32(0x01C)
+#define GPTIMER1_TWER		GPT1_REG32(0x020)
+#define GPTIMER1_TCLR		GPT1_REG32(0x024)
+#define GPTIMER1_TCRR		GPT1_REG32(0x028)
+#define GPTIMER1_TLDR		GPT1_REG32(0x02C)
+#define GPTIMER1_TTGR		GPT1_REG32(0x030)
+#define GPTIMER1_TWPS		GPT1_REG32(0x034)
+#define GPTIMER1_TMAR		GPT1_REG32(0x038)
+#define GPTIMER1_TCAR1		GPT1_REG32(0x03C)
+#define GPTIMER1_TSICR		GPT1_REG32(0x040)
+#define GPTIMER1_TCAR2		GPT1_REG32(0x044)
+
+/* rkw -- base fix up please... */
+#define GPTIMER3_TISR		__REG32(OMAP24XX_L4_IO_BASE+0x78018)
+
+/* SDRC */
+#define SDRC_DLLA_CTRL		__REG32(OMAP24XX_SDRC_BASE+0x060)
+#define SDRC_DLLA_STATUS	__REG32(OMAP24XX_SDRC_BASE+0x064)
+#define SDRC_DLLB_CTRL		__REG32(OMAP24XX_SDRC_BASE+0x068)
+#define SDRC_DLLB_STATUS	__REG32(OMAP24XX_SDRC_BASE+0x06C)
+#define SDRC_POWER		__REG32(OMAP24XX_SDRC_BASE+0x070)
+#define SDRC_MR_0		__REG32(OMAP24XX_SDRC_BASE+0x084)
+
+/* GPIO 1 */
+#define GPIO1_BASE		GPIOX_BASE(1)
+#define GPIO1_REG32(offset)	__REG32(GPIO1_BASE + (offset))
+#define GPIO1_IRQENABLE1	GPIO1_REG32(0x01C)
+#define GPIO1_IRQSTATUS1	GPIO1_REG32(0x018)
+#define GPIO1_IRQENABLE2	GPIO1_REG32(0x02C)
+#define GPIO1_IRQSTATUS2	GPIO1_REG32(0x028)
+#define GPIO1_WAKEUPENABLE	GPIO1_REG32(0x020)
+#define GPIO1_RISINGDETECT	GPIO1_REG32(0x048)
+#define GPIO1_DATAIN		GPIO1_REG32(0x038)
+#define GPIO1_OE		GPIO1_REG32(0x034)
+#define GPIO1_DATAOUT		GPIO1_REG32(0x03C)
+
+/* GPIO2 */
+#define GPIO2_BASE		GPIOX_BASE(2)
+#define GPIO2_REG32(offset)	__REG32(GPIO2_BASE + (offset))
+#define GPIO2_IRQENABLE1	GPIO2_REG32(0x01C)
+#define GPIO2_IRQSTATUS1	GPIO2_REG32(0x018)
+#define GPIO2_IRQENABLE2	GPIO2_REG32(0x02C)
+#define GPIO2_IRQSTATUS2	GPIO2_REG32(0x028)
+#define GPIO2_WAKEUPENABLE	GPIO2_REG32(0x020)
+#define GPIO2_RISINGDETECT	GPIO2_REG32(0x048)
+#define GPIO2_DATAIN		GPIO2_REG32(0x038)
+#define GPIO2_OE		GPIO2_REG32(0x034)
+#define GPIO2_DATAOUT		GPIO2_REG32(0x03C)
+
+/* GPIO 3 */
+#define GPIO3_BASE		GPIOX_BASE(3)
+#define GPIO3_REG32(offset)	__REG32(GPIO3_BASE + (offset))
+#define GPIO3_IRQENABLE1	GPIO3_REG32(0x01C)
+#define GPIO3_IRQSTATUS1	GPIO3_REG32(0x018)
+#define GPIO3_IRQENABLE2	GPIO3_REG32(0x02C)
+#define GPIO3_IRQSTATUS2	GPIO3_REG32(0x028)
+#define GPIO3_WAKEUPENABLE	GPIO3_REG32(0x020)
+#define GPIO3_RISINGDETECT	GPIO3_REG32(0x048)
+#define GPIO3_FALLINGDETECT	GPIO3_REG32(0x04C)
+#define GPIO3_DATAIN		GPIO3_REG32(0x038)
+#define GPIO3_OE		GPIO3_REG32(0x034)
+#define GPIO3_DATAOUT		GPIO3_REG32(0x03C)
+#define GPIO3_DEBOUNCENABLE	GPIO3_REG32(0x050)
+#define GPIO3_DEBOUNCINGTIME	GPIO3_REG32(0x054)
+
+/* GPIO 4 */
+#define GPIO4_BASE		GPIOX_BASE(4)
+#define GPIO4_REG32(offset)	__REG32(GPIO4_BASE + (offset))
+#define GPIO4_IRQENABLE1	GPIO4_REG32(0x01C)
+#define GPIO4_IRQSTATUS1	GPIO4_REG32(0x018)
+#define GPIO4_IRQENABLE2	GPIO4_REG32(0x02C)
+#define GPIO4_IRQSTATUS2	GPIO4_REG32(0x028)
+#define GPIO4_WAKEUPENABLE	GPIO4_REG32(0x020)
+#define GPIO4_RISINGDETECT	GPIO4_REG32(0x048)
+#define GPIO4_FALLINGDETECT	GPIO4_REG32(0x04C)
+#define GPIO4_DATAIN		GPIO4_REG32(0x038)
+#define GPIO4_OE		GPIO4_REG32(0x034)
+#define GPIO4_DATAOUT		GPIO4_REG32(0x03C)
+#define GPIO4_DEBOUNCENABLE	GPIO4_REG32(0x050)
+#define GPIO4_DEBOUNCINGTIME	GPIO4_REG32(0x054)
+
+
+/* IO CONFIG */
+#define CONTROL_BASE		(OMAP24XX_CTRL_BASE)
+#define CONTROL_REG32(offset)	__REG32(CONTROL_BASE + (offset))
+
+#define CONTROL_PADCONF_SPI1_NCS2	CONTROL_REG32(0x104)
+#define CONTROL_PADCONF_SYS_XTALOUT	CONTROL_REG32(0x134)
+#define CONTROL_PADCONF_UART1_RX	CONTROL_REG32(0x0C8)
+#define CONTROL_PADCONF_MCBSP1_DX	CONTROL_REG32(0x10C)
+#define CONTROL_PADCONF_GPMC_NCS4	CONTROL_REG32(0x090)
+#define CONTROL_PADCONF_DSS_D5		CONTROL_REG32(0x0B8)
+#define CONTROL_PADCONF_DSS_D9		CONTROL_REG32(0x0BC)
+#define CONTROL_PADCONF_DSS_D13		CONTROL_REG32(0x0C0)
+#define CONTROL_PADCONF_DSS_VSYNC	CONTROL_REG32(0x0CC)
+
+/* CONTROL */
+#define CONTROL_DEVCONF		CONTROL_REG32(0x274)
+
+/* INTERRUPT CONTROLLER */
+#define INTC_BASE		(OMAP24XX_L4_IO_BASE+0xfe000)
+#define INTC_REG32(offset)	__REG32(INTC_BASE + (offset))
+
+#define INTC1_U_BASE		INTC_REG32(0x000)
+#define INTC_MIR0		INTC_REG32(0x084)
+#define INTC_MIR_SET0		INTC_REG32(0x08C)
+#define INTC_MIR_CLEAR0		INTC_REG32(0x088)
+#define INTC_ISR_CLEAR0		INTC_REG32(0x094)
+#define INTC_MIR1		INTC_REG32(0x0A4)
+#define INTC_MIR_SET1		INTC_REG32(0x0AC)
+#define INTC_MIR_CLEAR1		INTC_REG32(0x0A8)
+#define INTC_ISR_CLEAR1		INTC_REG32(0x0B4)
+#define INTC_MIR2		INTC_REG32(0x0C4)
+#define INTC_MIR_SET2		INTC_REG32(0x0CC)
+#define INTC_MIR_CLEAR2		INTC_REG32(0x0C8)
+#define INTC_ISR_CLEAR2		INTC_REG32(0x0D4)
+#define INTC_SIR_IRQ		INTC_REG32(0x040)
+#define INTC_CONTROL		INTC_REG32(0x048)
+#define INTC_ILR11		INTC_REG32(0x12C)
+#define INTC_ILR32		INTC_REG32(0x180)
+#define INTC_ILR37		INTC_REG32(0x194)
+#define INTC_SYSCONFIG		INTC_REG32(0x010)
+
+/* RAM FIREWALL */
+#define RAMFW_BASE		(0x68005000)
+#define RAMFW_REG32(offset)	__REG32(RAMFW_BASE + (offset))
+
+#define RAMFW_REQINFOPERM0	RAMFW_REG32(0x048)
+#define RAMFW_READPERM0		RAMFW_REG32(0x050)
+#define RAMFW_WRITEPERM0	RAMFW_REG32(0x058)
+
+/* GPMC CS1 FPGA ON USER INTERFACE MODULE */
+//#define DEBUG_BOARD_LED_REGISTER 0x04000014
+
+/* GPMC CS0 */
+#define GPMC_CONFIG1_0		GPMC_REG32(0x060)
+#define GPMC_CONFIG2_0		GPMC_REG32(0x064)
+#define GPMC_CONFIG3_0		GPMC_REG32(0x068)
+#define GPMC_CONFIG4_0		GPMC_REG32(0x06C)
+#define GPMC_CONFIG5_0		GPMC_REG32(0x070)
+#define GPMC_CONFIG6_0		GPMC_REG32(0x074)
+#define GPMC_CONFIG7_0		GPMC_REG32(0x078)
+
+/* DSS */
+#define DSS_CONTROL		DISP_REG32(0x040)
+#define DISPC_CONTROL		DISP_REG32(0x440)
+#define DISPC_SYSSTATUS		DISP_REG32(0x414)
+#define DISPC_IRQSTATUS		DISP_REG32(0x418)
+#define DISPC_IRQENABLE		DISP_REG32(0x41C)
+#define DISPC_CONFIG		DISP_REG32(0x444)
+#define DISPC_DEFAULT_COLOR0	DISP_REG32(0x44C)
+#define DISPC_DEFAULT_COLOR1	DISP_REG32(0x450)
+#define DISPC_TRANS_COLOR0	DISP_REG32(0x454)
+#define DISPC_TRANS_COLOR1	DISP_REG32(0x458)
+#define DISPC_LINE_NUMBER	DISP_REG32(0x460)
+#define DISPC_TIMING_H		DISP_REG32(0x464)
+#define DISPC_TIMING_V		DISP_REG32(0x468)
+#define DISPC_POL_FREQ		DISP_REG32(0x46C)
+#define DISPC_DIVISOR		DISP_REG32(0x470)
+#define DISPC_SIZE_DIG		DISP_REG32(0x478)
+#define DISPC_SIZE_LCD		DISP_REG32(0x47C)
+#define DISPC_GFX_BA0		DISP_REG32(0x480)
+#define DISPC_GFX_BA1		DISP_REG32(0x484)
+#define DISPC_GFX_POSITION	DISP_REG32(0x488)
+#define DISPC_GFX_SIZE		DISP_REG32(0x48C)
+#define DISPC_GFX_ATTRIBUTES	DISP_REG32(0x4A0)
+#define DISPC_GFX_FIFO_THRESHOLD	DISP_REG32(0x4A4)
+#define DISPC_GFX_ROW_INC	DISP_REG32(0x4AC)
+#define DISPC_GFX_PIXEL_INC	DISP_REG32(0x4B0)
+#define DISPC_GFX_WINDOW_SKIP	DISP_REG32(0x4B4)
+#define DISPC_GFX_TABLE_BA	DISP_REG32(0x4B8)
+#define DISPC_DATA_CYCLE1	DISP_REG32(0x5D4)
+#define DISPC_DATA_CYCLE2	DISP_REG32(0x5D8)
+#define DISPC_DATA_CYCLE3	DISP_REG32(0x5DC)
+
+/* Wake up define for board */
+#define GPIO97			(1 << 1)
+#define GPIO88			(1 << 24)
+
+#endif /* __ASSEMBLER__ */
+
+#endif
+
+
+
+
+
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
new file mode 100644
index 0000000..f4df04f
--- /dev/null
+++ b/arch/arm/mach-omap2/serial.c
@@ -0,0 +1,180 @@
+/*
+ * arch/arm/mach-omap/omap2/serial.c
+ *
+ * OMAP2 serial support.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * Based off of arch/arm/mach-omap/omap1/serial.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+
+#include <asm/io.h>
+#include <asm/hardware/clock.h>
+
+#include <asm/arch/common.h>
+#include <asm/arch/board.h>
+
+static struct clk * uart1_ick = NULL;
+static struct clk * uart1_fck = NULL;
+static struct clk * uart2_ick = NULL;
+static struct clk * uart2_fck = NULL;
+static struct clk * uart3_ick = NULL;
+static struct clk * uart3_fck = NULL;
+
+static struct plat_serial8250_port serial_platform_data[] = {
+	{
+		.membase	= (char *)IO_ADDRESS(OMAP_UART1_BASE),
+		.mapbase	= (unsigned long)OMAP_UART1_BASE,
+		.irq		= 72,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+		.uartclk	= OMAP16XX_BASE_BAUD * 16,
+	}, {
+		.membase	= (char *)IO_ADDRESS(OMAP_UART2_BASE),
+		.mapbase	= (unsigned long)OMAP_UART2_BASE,
+		.irq		= 73,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+		.uartclk	= OMAP16XX_BASE_BAUD * 16,
+	}, {
+		.membase	= (char *)IO_ADDRESS(OMAP_UART3_BASE),
+		.mapbase	= (unsigned long)OMAP_UART3_BASE,
+		.irq		= 74,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+		.uartclk	= OMAP16XX_BASE_BAUD * 16,
+	}, {
+		.flags		= 0
+	}
+};
+
+static inline unsigned int serial_read_reg(struct plat_serial8250_port *up,
+					   int offset)
+{
+	offset <<= up->regshift;
+	return (unsigned int)__raw_readb(up->membase + offset);
+}
+
+static inline void serial_write_reg(struct plat_serial8250_port *p, int offset,
+				    int value)
+{
+	offset <<= p->regshift;
+	__raw_writeb(value, (unsigned long)(p->membase + offset));
+}
+
+/*
+ * Internal UARTs need to be initialized for the 8250 autoconfig to work
+ * properly. Note that the TX watermark initialization may not be needed
+ * once the 8250.c watermark handling code is merged.
+ */
+static inline void __init omap_serial_reset(struct plat_serial8250_port *p)
+{
+	serial_write_reg(p, UART_OMAP_MDR1, 0x07);
+	serial_write_reg(p, UART_OMAP_SCR, 0x08);
+	serial_write_reg(p, UART_OMAP_MDR1, 0x00);
+	serial_write_reg(p, UART_OMAP_SYSC, 0x01);
+}
+
+void __init omap_serial_init()
+{
+	int i;
+	const struct omap_uart_config *info;
+
+	/*
+	 * Make sure the serial ports are muxed on at this point.
+	 * You have to mux them off in device drivers later on
+	 * if not needed.
+	 */
+
+	info = omap_get_config(OMAP_TAG_UART,
+			       struct omap_uart_config);
+
+	if (info == NULL)
+		return;
+
+	for (i = 0; i < OMAP_MAX_NR_PORTS; i++) {
+		struct plat_serial8250_port *p = serial_platform_data + i;
+
+		if (!(info->enabled_uarts & (1 << i))) {
+			p->membase = 0;
+			p->mapbase = 0;
+			continue;
+		}
+
+		switch (i) {
+		case 0:
+			uart1_ick = clk_get(NULL, "uart1_ick");
+			if (IS_ERR(uart1_ick))
+				printk("Could not get uart1_ick\n");
+			else {
+				clk_use(uart1_ick);
+			}
+
+			uart1_fck = clk_get(NULL, "uart1_fck");
+			if (IS_ERR(uart1_fck))
+				printk("Could not get uart1_fck\n");
+			else {
+				clk_use(uart1_fck);
+			}
+			break;
+		case 1:
+			uart2_ick = clk_get(NULL, "uart2_ick");
+			if (IS_ERR(uart2_ick))
+				printk("Could not get uart2_ick\n");
+			else {
+				clk_use(uart2_ick);
+			}
+
+			uart2_fck = clk_get(NULL, "uart2_fck");
+			if (IS_ERR(uart2_fck))
+				printk("Could not get uart2_fck\n");
+			else {
+				clk_use(uart2_fck);
+			}
+			break;
+		case 2:
+			uart3_ick = clk_get(NULL, "uart3_ick");
+			if (IS_ERR(uart3_ick))
+				printk("Could not get uart3_ick\n");
+			else {
+				clk_use(uart3_ick);
+			}
+
+			uart3_fck = clk_get(NULL, "uart3_fck");
+			if (IS_ERR(uart3_fck))
+				printk("Could not get uart3_fck\n");
+			else {
+				clk_use(uart3_fck);
+			}
+			break;
+		}
+
+		omap_serial_reset(p);
+	}
+}
+
+static struct platform_device serial_device = {
+	.name			= "serial8250",
+	.id			= 0,
+	.dev			= {
+		.platform_data	= serial_platform_data,
+	},
+};
+
+static int __init omap_init(void)
+{
+	return platform_device_register(&serial_device);
+}
+arch_initcall(omap_init);
diff --git a/arch/arm/mach-omap2/sram-fn.S b/arch/arm/mach-omap2/sram-fn.S
new file mode 100644
index 0000000..2a869e2
--- /dev/null
+++ b/arch/arm/mach-omap2/sram-fn.S
@@ -0,0 +1,333 @@
+/*
+ * linux/arch/arm/mach-omap1/sram.S
+ *
+ * Omap2 specific functions that need to be run in internal SRAM
+ *
+ * (C) Copyright 2004
+ * Texas Instruments, <www.ti.com>
+ * Richard Woodruff <r-woodruff2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/arch/io.h>
+#include <asm/hardware.h>
+
+#include <asm/arch/prcm.h>
+
+#define TIMER_32KSYNCT_CR_V	IO_ADDRESS(OMAP24XX_32KSYNCT_BASE + 0x010)
+
+#define CM_CLKSEL2_PLL_V	IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x544)
+#define PRCM_VOLTCTRL_V		IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x050)
+#define PRCM_CLKCFG_CTRL_V	IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x080)
+#define CM_CLKEN_PLL_V		IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x500)
+#define CM_IDLEST_CKGEN_V	IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x520)
+#define CM_CLKSEL1_PLL_V	IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x540)
+
+#define SDRC_DLLA_CTRL_V	IO_ADDRESS(OMAP24XX_SDRC_BASE + 0x060)
+#define SDRC_RFR_CTRL_V		IO_ADDRESS(OMAP24XX_SDRC_BASE + 0x0a4)
+
+	.text
+
+ENTRY(sram_ddr_init)
+	stmfd	sp!, {r0 - r12, lr}	@ save registers on stack
+
+	mov	r12, r2			@ capture CS1 vs CS0
+	mov	r8, r3			@ capture force parameter
+
+	/* frequency shift down */
+	ldr	r2, cm_clksel2_pll	@ get address of dpllout reg
+	mov	r3, #0x1		@ value for 1x operation
+	str	r3, [r2]		@ go to L1-freq operation
+
+	/* voltage shift down */
+	mov r9, #0x1			@ set up for L1 voltage call
+	bl voltage_shift		@ go drop voltage
+
+	/* dll lock mode */
+	ldr	r11, sdrc_dlla_ctrl	@ addr of dlla ctrl
+	ldr	r10, [r11]		@ get current val
+	cmp	r12, #0x1		@ cs1 base (2422 es2.05/1)
+	addeq	r11, r11, #0x8		@ if cs1 base, move to DLLB
+	mvn	r9, #0x4		@ mask to get clear bit2
+	and	r10, r10, r9		@ clear bit2 for lock mode.
+	orr	r10, r10, #0x8		@ make sure DLL on (es2 bit pos)
+	orr	r10, r10, #0x2		@ 90 degree phase for all below 133Mhz
+	str	r10, [r11]		@ commit to DLLA_CTRL
+	bl	i_dll_wait		@ wait for dll to lock
+
+	/* get dll value */
+	add	r11, r11, #0x4		@ get addr of status reg
+	ldr	r10, [r11]		@ get locked value
+
+	/* voltage shift up */
+	mov r9, #0x0			@ shift back to L0-voltage
+	bl voltage_shift		@ go raise voltage
+
+	/* frequency shift up */
+	mov	r3, #0x2		@ value for 2x operation
+	str	r3, [r2]		@ go to L0-freq operation
+
+	/* reset entry mode for dllctrl */
+	sub	r11, r11, #0x4		@ move from status to ctrl
+	cmp	r12, #0x1		@ normalize if cs1 based
+	subeq	r11, r11, #0x8		@ possibly back to DLLA
+	cmp	r8, #0x1		@ if forced unlock exit
+	orreq	r1, r1, #0x4		@ make sure exit with unlocked value
+	str	r1, [r11]		@ restore DLLA_CTRL high value
+	add	r11, r11, #0x8		@ move to DLLB_CTRL addr
+	str	r1, [r11]		@ set value DLLB_CTRL
+	bl	i_dll_wait		@ wait for possible lock
+
+	/* set up for return, DDR should be good */
+	str r10, [r0]			@ write dll_status and return counter
+	ldmfd	sp!, {r0 - r12, pc}	@ restore regs and return
+
+	/* ensure the DLL has relocked */
+i_dll_wait:
+	mov	r4, #0x800		@ delay DLL relock, min 0x400 L3 clocks
+i_dll_delay:
+	subs	r4, r4, #0x1
+	bne	i_dll_delay
+	mov	pc, lr
+
+	/*
+	 * shift up or down voltage, use R9 as input to tell level.
+	 * wait for it to finish, use 32k sync counter, 1tick=31uS.
+	 */
+voltage_shift:
+	ldr	r4, prcm_voltctrl	@ get addr of volt ctrl.
+	ldr	r5, [r4]		@ get value.
+	ldr	r6, prcm_mask_val	@ get value of mask
+	and	r5, r5, r6		@ apply mask to clear bits
+	orr	r5, r5, r9		@ bulld value for L0/L1-volt operation.
+	str	r5, [r4]		@ set up for change.
+	mov	r3, #0x4000		@ get val for force
+	orr	r5, r5, r3		@ build value for force
+	str	r5, [r4]		@ Force transition to L1
+
+	ldr	r3, timer_32ksynct_cr	@ get addr of counter
+	ldr	r5, [r3]		@ get value
+	add	r5, r5, #0x3		@ give it at most 93uS
+volt_delay:
+	ldr	r7, [r3]		@ get timer value
+	cmp	r5, r7			@ time up?
+	bhi	volt_delay		@ not yet->branch
+	mov	pc, lr			@ back to caller.
+
+/* relative load constants */
+cm_clksel2_pll:
+	.word CM_CLKSEL2_PLL_V
+sdrc_dlla_ctrl:
+	.word SDRC_DLLA_CTRL_V
+prcm_voltctrl:
+	.word PRCM_VOLTCTRL_V
+prcm_mask_val:
+	.word 0xFFFF3FFC
+timer_32ksynct_cr:
+	.word TIMER_32KSYNCT_CR_V
+ENTRY(sram_ddr_init_sz)
+	.word	. - sram_ddr_init
+
+/*
+ * Reprograms memory timings.
+ * r0 = [PRCM_FULL | PRCM_HALF] r1 = SDRC_DLLA_CTRL value r2 = [DDR | SDR]
+ * PRCM_FULL = 2, PRCM_HALF = 1, DDR = 1, SDR = 0
+ */
+ENTRY(sram_reprogram_sdrc)
+	stmfd	sp!, {r0 - r10, lr}	@ save registers on stack
+	mov	r3, #0x0		@ clear for mrc call
+	mcr	p15, 0, r3, c7, c10, 4	@ memory barrier, finish ARM SDR/DDR
+	nop
+	nop
+	ldr	r6, ddr_sdrc_rfr_ctrl	@ get addr of refresh reg
+	ldr	r5, [r6]		@ get value
+	mov	r5, r5, lsr #8		@ isolate rfr field and drop burst
+
+	cmp	r0, #0x1		@ going to half speed?
+	movne	r9, #0x0		@ if up set flag up for pre up, hi volt
+
+	blne	voltage_shift_c		@ adjust voltage
+
+	cmp	r0, #0x1		@ going to half speed (post branch link)
+	moveq	r5, r5, lsr #1		@ divide by 2 if to half
+	movne	r5, r5, lsl #1		@ mult by 2 if to full
+	mov	r5, r5, lsl #8		@ put rfr field back into place
+	add	r5, r5, #0x1		@ turn on burst of 1
+	ldr	r4, ddr_cm_clksel2_pll	@ get address of out reg
+	ldr	r3, [r4]		@ get curr value
+	orr	r3, r3, #0x3
+	bic	r3, r3, #0x3		@ clear lower bits
+	orr	r3, r3, r0		@ new state value
+	str	r3, [r4]		@ set new state (pll/x, x=1 or 2)
+	nop
+	nop
+
+	moveq	r9, #0x1		@ if speed down, post down, drop volt
+	bleq	voltage_shift_c
+
+	mcr	p15, 0, r3, c7, c10, 4	@ memory barrier
+	str	r5, [r6]		@ set new RFR_1 value
+	add	r6, r6, #0x30		@ get RFR_2 addr
+	str	r5, [r6]		@ set RFR_2
+	nop
+	cmp	r2, #0x1		@ (SDR or DDR) do we need to adjust DLL
+	bne	freq_out		@ leave if SDR, no DLL function
+
+	/* With DDR, we need to take care of the DLL for the frequency change */
+	ldr	r2, ddr_sdrc_dlla_ctrl	@ addr of dlla ctrl
+	str	r1, [r2]		@ write out new SDRC_DLLA_CTRL
+	add	r2, r2, #0x8		@ addr to SDRC_DLLB_CTRL
+	str	r1, [r2]		@ commit to SDRC_DLLB_CTRL
+	mov	r1, #0x2000		@ wait DLL relock, min 0x400 L3 clocks
+dll_wait:
+	subs	r1, r1, #0x1
+	bne	dll_wait
+freq_out:
+	ldmfd	sp!, {r0 - r10, pc}	@ restore regs and return
+
+    /*
+     * shift up or down voltage, use R9 as input to tell level.
+     *	wait for it to finish, use 32k sync counter, 1tick=31uS.
+     */
+voltage_shift_c:
+	ldr	r10, ddr_prcm_voltctrl	@ get addr of volt ctrl
+	ldr	r8, [r10]		@ get value
+	ldr	r7, ddr_prcm_mask_val	@ get value of mask
+	and	r8, r8, r7		@ apply mask to clear bits
+	orr	r8, r8, r9		@ bulld value for L0/L1-volt operation.
+	str	r8, [r10]		@ set up for change.
+	mov	r7, #0x4000		@ get val for force
+	orr	r8, r8, r7		@ build value for force
+	str	r8, [r10]		@ Force transition to L1
+
+	ldr	r10, ddr_timer_32ksynct	@ get addr of counter
+	ldr	r8, [r10]		@ get value
+	add	r8, r8, #0x2		@ give it at most 62uS (min 31+)
+volt_delay_c:
+	ldr	r7, [r10]		@ get timer value
+	cmp	r8, r7			@ time up?
+	bhi	volt_delay_c		@ not yet->branch
+	mov	pc, lr			@ back to caller
+
+ddr_cm_clksel2_pll:
+	.word CM_CLKSEL2_PLL_V
+ddr_sdrc_dlla_ctrl:
+	.word SDRC_DLLA_CTRL_V
+ddr_sdrc_rfr_ctrl:
+	.word SDRC_RFR_CTRL_V
+ddr_prcm_voltctrl:
+	.word PRCM_VOLTCTRL_V
+ddr_prcm_mask_val:
+	.word 0xFFFF3FFC
+ddr_timer_32ksynct:
+	.word TIMER_32KSYNCT_CR_V
+
+ENTRY(sram_reprogram_sdrc_sz)
+	.word	. - sram_reprogram_sdrc
+
+/*
+ * Set dividers and pll. Also recalculate DLL value for DDR and unlock mode.
+ */
+ENTRY(sram_set_prcm)
+	stmfd	sp!, {r0-r12, lr}	@ regs to stack
+	adr	r4, pbegin		@ addr of preload start
+	adr	r8, pend		@ addr of preload end
+	mcrr	p15, 1, r8, r4, c12	@ preload into icache
+pbegin:
+	/* move into fast relock bypass */
+	ldr	r8, pll_ctl		@ get addr
+	ldr	r5, [r8]		@ get val
+	mvn	r6, #0x3		@ clear mask
+	and	r5, r5, r6		@ clear field
+	orr	r7, r5, #0x2		@ fast relock val
+	str	r7, [r8]		@ go to fast relock
+	ldr	r4, pll_stat		@ addr of stat
+block:
+	/* wait for bypass */
+	ldr	r8, [r4]		@ stat value
+	and	r8, r8, #0x3		@ mask for stat
+	cmp	r8, #0x1		@ there yet
+	bne	block			@ loop if not
+
+	/* set new dpll dividers _after_ in bypass */
+	ldr	r4, pll_div		@ get addr
+	str	r0, [r4]		@ set dpll ctrl val
+
+	ldr	r4, set_config		@ get addr
+	mov	r8, #1			@ valid cfg msk
+	str	r8, [r4]		@ make dividers take
+
+	mov	r4, #100		@ dead spin a bit
+wait_a_bit:
+	subs	r4, r4, #1		@ dec loop
+	bne	wait_a_bit		@ delay done?
+
+	/* check if staying in bypass */
+	cmp	r2, #0x1		@ stay in bypass?
+	beq	pend			@ jump over dpll relock
+
+	/* relock DPLL with new vals */
+	ldr	r5, pll_stat		@ get addr
+	ldr	r4, pll_ctl		@ get addr
+	orr	r8, r7, #0x3		@ val for lock dpll
+	str	r8, [r4]		@ set val
+	mov	r0, #1000		@ dead spin a bit
+wait_more:
+	subs	r0, r0, #1		@ dec loop
+	bne	wait_more		@ delay done?
+wait_lock:
+	ldr	r8, [r5]		@ get lock val
+	and	r8, r8, #3		@ isolate field
+	cmp	r8, #2			@ locked?
+	bne	wait_lock		@ wait if not
+pend:
+	/* update memory timings & briefly lock dll */
+	ldr	r4, sdrc_rfr		@ get addr
+	str	r1, [r4]		@ update refresh timing
+	ldr	r11, dlla_ctrl		@ get addr of DLLA ctrl
+	ldr	r10, [r11]		@ get current val
+	mvn	r9, #0x4		@ mask to get clear bit2
+	and	r10, r10, r9		@ clear bit2 for lock mode
+	orr	r10, r10, #0x8		@ make sure DLL on (es2 bit pos)
+	str	r10, [r11]		@ commit to DLLA_CTRL
+	add	r11, r11, #0x8		@ move to dllb
+	str	r10, [r11]		@ hit DLLB also
+
+	mov	r4, #0x800		@ relock time (min 0x400 L3 clocks)
+wait_dll_lock:
+	subs	r4, r4, #0x1
+	bne	wait_dll_lock
+	nop
+	ldmfd	sp!, {r0-r12, pc}	@ restore regs and return
+
+set_config:
+	.word PRCM_CLKCFG_CTRL_V
+pll_ctl:
+	.word CM_CLKEN_PLL_V
+pll_stat:
+	.word CM_IDLEST_CKGEN_V
+pll_div:
+	.word CM_CLKSEL1_PLL_V
+sdrc_rfr:
+	.word SDRC_RFR_CTRL_V
+dlla_ctrl:
+	.word SDRC_DLLA_CTRL_V
+
+ENTRY(sram_set_prcm_sz)
+	.word	. - sram_set_prcm
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
new file mode 100644
index 0000000..9ec1144
--- /dev/null
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/arm/mach-omap2/timer-gp.c
+ *
+ * OMAP2 GP timer support.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *         Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * Some parts based off of TI's 24xx code:
+ *
+ *   Copyright (C) 2004 Texas Instruments, Inc.
+ *
+ * Roughly modelled after the OMAP1 MPU timer code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <asm/mach/time.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/hardware/clock.h>
+
+#define OMAP2_GP_TIMER1_BASE	0x48028000
+#define OMAP2_GP_TIMER2_BASE	0x4802a000
+#define OMAP2_GP_TIMER3_BASE	0x48078000
+#define OMAP2_GP_TIMER4_BASE	0x4807a000
+
+#define GP_TIMER_TIDR		0x00
+#define GP_TIMER_TISR		0x18
+#define GP_TIMER_TIER		0x1c
+#define GP_TIMER_TCLR		0x24
+#define GP_TIMER_TCRR		0x28
+#define GP_TIMER_TLDR		0x2c
+#define GP_TIMER_TSICR		0x40
+
+#define OS_TIMER_NR		1  /* GP timer 2 */
+
+static unsigned long timer_base[] = {
+	IO_ADDRESS(OMAP2_GP_TIMER1_BASE),
+	IO_ADDRESS(OMAP2_GP_TIMER2_BASE),
+	IO_ADDRESS(OMAP2_GP_TIMER3_BASE),
+	IO_ADDRESS(OMAP2_GP_TIMER4_BASE),
+};
+
+static inline unsigned int timer_read_reg(int nr, unsigned int reg)
+{
+	return __raw_readl(timer_base[nr] + reg);
+}
+
+static inline void timer_write_reg(int nr, unsigned int reg, unsigned int val)
+{
+	__raw_writel(val, timer_base[nr] + reg);
+}
+
+/* Note that we always enable the clock prescale divider bit */
+static inline void omap2_gp_timer_start(int nr, unsigned long load_val)
+{
+	unsigned int tmp;
+
+	tmp = 0xffffffff - load_val;
+
+	timer_write_reg(nr, GP_TIMER_TLDR, tmp);
+	timer_write_reg(nr, GP_TIMER_TCRR, tmp);
+	timer_write_reg(nr, GP_TIMER_TIER, 1 << 1);
+	timer_write_reg(nr, GP_TIMER_TCLR, (1 << 5) | (1 << 1) | 1);
+}
+
+static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id,
+					    struct pt_regs *regs)
+{
+	write_seqlock(&xtime_lock);
+
+	timer_write_reg(OS_TIMER_NR, GP_TIMER_TISR, 1 << 1);
+	timer_tick(regs);
+
+	write_sequnlock(&xtime_lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction omap2_gp_timer_irq = {
+	.name		= "gp timer",
+	.flags		= SA_INTERRUPT,
+	.handler	= omap2_gp_timer_interrupt,
+};
+
+static void __init omap2_gp_timer_init(void)
+{
+	struct clk * sys_ck;
+	u32 tick_period = 120000;
+	u32 l;
+
+	/* Reset clock and prescale value */
+	timer_write_reg(OS_TIMER_NR, GP_TIMER_TCLR, 0);
+
+	sys_ck = clk_get(NULL, "sys_ck");
+	if (IS_ERR(sys_ck))
+		printk(KERN_ERR "Could not get sys_ck\n");
+	else {
+		clk_use(sys_ck);
+		tick_period = clk_get_rate(sys_ck) / 100;
+		clk_put(sys_ck);
+	}
+
+	tick_period /= 2;	/* Minimum prescale divider is 2 */
+	tick_period -= 1;
+
+	l = timer_read_reg(OS_TIMER_NR, GP_TIMER_TIDR);
+	printk(KERN_INFO "OMAP2 GP timer (HW version %d.%d)\n",
+	       (l >> 4) & 0x0f, l & 0x0f);
+
+	setup_irq(38, &omap2_gp_timer_irq);
+
+	omap2_gp_timer_start(OS_TIMER_NR, tick_period);
+}
+
+struct sys_timer omap_timer = {
+	.init	= omap2_gp_timer_init,
+};
+
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index b380a438..e201aa9 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -60,6 +60,7 @@
 	bool "Enable Sharp SL-C700 (Corgi) Support"
 	depends PXA_SHARPSL_25x
 	select PXA_SHARP_C7xx
+	select PXA_SSP
 
 config MACH_SHEPHERD
 	bool "Enable Sharp SL-C750 (Shepherd) Support"
@@ -102,12 +103,18 @@
 
 config PXA_SHARP_C7xx
 	bool
+	select PXA_SSP
 	help
 	  Enable support for all Sharp C7xx models
 
 config PXA_SHARP_Cxx00
 	bool
+	select PXA_SSP
 	help
 	  Enable common support for Sharp Cxx00 models
 
+config PXA_SSP
+	tristate
+	help
+	  Enable support for PXA2xx SSP ports
 endif
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 8bc72d0..d210bd5 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -11,8 +11,8 @@
 obj-$(CONFIG_ARCH_LUBBOCK) += lubbock.o
 obj-$(CONFIG_MACH_MAINSTONE) += mainstone.o
 obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
-obj-$(CONFIG_PXA_SHARP_C7xx)	+= corgi.o corgi_ssp.o corgi_lcd.o ssp.o
-obj-$(CONFIG_PXA_SHARP_Cxx00)	+= spitz.o corgi_ssp.o corgi_lcd.o ssp.o
+obj-$(CONFIG_PXA_SHARP_C7xx)	+= corgi.o corgi_ssp.o corgi_lcd.o
+obj-$(CONFIG_PXA_SHARP_Cxx00)	+= spitz.o corgi_ssp.o corgi_lcd.o
 obj-$(CONFIG_MACH_POODLE)	+= poodle.o
 obj-$(CONFIG_MACH_TOSA)         += tosa.o
 
@@ -26,6 +26,7 @@
 
 # Misc features
 obj-$(CONFIG_PM) += pm.o sleep.o
+obj-$(CONFIG_PXA_SSP) += ssp.o
 
 ifeq ($(CONFIG_PXA27x),y)
 obj-$(CONFIG_PM) += standby.o
diff --git a/arch/arm/mach-pxa/corgi_ssp.c b/arch/arm/mach-pxa/corgi_ssp.c
index 591e5f3..bdf10cf 100644
--- a/arch/arm/mach-pxa/corgi_ssp.c
+++ b/arch/arm/mach-pxa/corgi_ssp.c
@@ -203,7 +203,7 @@
 	GPDR(ssp_machinfo->cs_ads7846) |= GPIO_bit(ssp_machinfo->cs_ads7846);  /* output */
 	GPSR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846);   /* High - Disable ADS7846*/
 
-	ret = ssp_init(&corgi_ssp_dev,ssp_machinfo->port);
+	ret = ssp_init(&corgi_ssp_dev, ssp_machinfo->port, 0);
 
 	if (ret)
 		printk(KERN_ERR "Unable to register SSP handler!\n");
diff --git a/arch/arm/mach-pxa/sharpsl.h b/arch/arm/mach-pxa/sharpsl.h
index 3977a77..4879c0f 100644
--- a/arch/arm/mach-pxa/sharpsl.h
+++ b/arch/arm/mach-pxa/sharpsl.h
@@ -32,3 +32,90 @@
 void spitz_put_hsync(void);
 void corgi_wait_hsync(void);
 void spitz_wait_hsync(void);
+
+/*
+ * SharpSL Battery/PM Driver
+ */
+
+struct sharpsl_charger_machinfo {
+	void (*init)(void);
+	int gpio_acin;
+	int gpio_batfull;
+	int gpio_batlock;
+	int gpio_fatal;
+	int (*status_acin)(void);
+	void (*discharge)(int);
+	void (*discharge1)(int);
+	void (*charge)(int);
+	void (*chargeled)(int);
+	void (*measure_temp)(int);
+	void (*presuspend)(void);
+	void (*postsuspend)(void);
+	unsigned long (*charger_wakeup)(void);
+	int (*should_wakeup)(unsigned int resume_on_alarm);
+	int bat_levels;
+	struct battery_thresh *bat_levels_noac;
+	struct battery_thresh *bat_levels_acin;
+	int status_high_acin;
+	int status_low_acin;
+	int status_high_noac;
+	int status_low_noac;
+};
+
+struct battery_thresh {
+	int voltage;
+	int percentage;
+};
+
+struct battery_stat {
+	int ac_status;         /* APM AC Present/Not Present */
+	int mainbat_status;    /* APM Main Battery Status */
+	int mainbat_percent;   /* Main Battery Percentage Charge */
+	int mainbat_voltage;   /* Main Battery Voltage */
+};
+
+struct sharpsl_pm_status {
+	struct device *dev;
+	struct timer_list ac_timer;
+	struct timer_list chrg_full_timer;
+
+	int charge_mode;
+#define CHRG_ERROR    (-1)
+#define CHRG_OFF      (0)
+#define CHRG_ON       (1)
+#define CHRG_DONE     (2)
+
+	unsigned int flags;
+#define SHARPSL_SUSPENDED       (1 << 0)  /* Device is Suspended */
+#define SHARPSL_ALARM_ACTIVE    (1 << 1)  /* Alarm is for charging event (not user) */
+#define SHARPSL_BL_LIMIT        (1 << 2)  /* Backlight Intensity Limited */
+#define SHARPSL_APM_QUEUED      (1 << 3)  /* APM Event Queued */
+#define SHARPSL_DO_OFFLINE_CHRG (1 << 4)  /* Trigger the offline charger */
+
+	int full_count;
+	unsigned long charge_start_time;
+	struct sharpsl_charger_machinfo *machinfo;
+	struct battery_stat battstat;
+};
+
+extern struct sharpsl_pm_status sharpsl_pm;
+extern struct battery_thresh spitz_battery_levels_acin[];
+extern struct battery_thresh spitz_battery_levels_noac[];
+
+#define READ_GPIO_BIT(x)    (GPLR(x) & GPIO_bit(x))
+
+#define SHARPSL_LED_ERROR  2
+#define SHARPSL_LED_ON     1
+#define SHARPSL_LED_OFF    0
+
+#define CHARGE_ON()         sharpsl_pm.machinfo->charge(1)
+#define CHARGE_OFF()        sharpsl_pm.machinfo->charge(0)
+#define CHARGE_LED_ON()     sharpsl_pm.machinfo->chargeled(SHARPSL_LED_ON)
+#define CHARGE_LED_OFF()    sharpsl_pm.machinfo->chargeled(SHARPSL_LED_OFF)
+#define CHARGE_LED_ERR()    sharpsl_pm.machinfo->chargeled(SHARPSL_LED_ERROR)
+#define DISCHARGE_ON()      sharpsl_pm.machinfo->discharge(1)
+#define DISCHARGE_OFF()     sharpsl_pm.machinfo->discharge(0)
+#define STATUS_AC_IN        sharpsl_pm.machinfo->status_acin()
+#define STATUS_BATT_LOCKED  READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batlock)
+#define STATUS_CHRG_FULL    READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batfull)
+#define STATUS_FATAL        READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_fatal)
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
new file mode 100644
index 0000000..6c9e871
--- /dev/null
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -0,0 +1,992 @@
+/*
+ * Battery and Power Management code for the Sharp SL-C7xx and SL-Cxx00
+ * series of PDAs
+ *
+ * Copyright (c) 2004-2005 Richard Purdie
+ *
+ * Based on code written by Sharp for 2.4 kernels
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/apm_bios.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+
+#include <asm/hardware.h>
+#include <asm/hardware/scoop.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+#include <asm/apm.h>
+
+#include <asm/arch/pm.h>
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/sharpsl.h>
+#include "sharpsl.h"
+
+/*
+ * Constants
+ */
+#define SHARPSL_CHARGE_ON_TIME_INTERVAL        (msecs_to_jiffies(1*60*1000))  /* 1 min */
+#define SHARPSL_CHARGE_FINISH_TIME             (msecs_to_jiffies(10*60*1000)) /* 10 min */
+#define SHARPSL_BATCHK_TIME                    (msecs_to_jiffies(15*1000))    /* 15 sec */
+#define SHARPSL_BATCHK_TIME_SUSPEND            (60*10)                        /* 10 min */
+#define SHARPSL_WAIT_CO_TIME                   15  /* 15 sec */
+#define SHARPSL_WAIT_DISCHARGE_ON              100 /* 100 msec */
+#define SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP   10  /* 10 msec */
+#define SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT   10  /* 10 msec */
+#define SHARPSL_CHECK_BATTERY_WAIT_TIME_JKVAD  10  /* 10 msec */
+#define SHARPSL_CHARGE_WAIT_TIME               15  /* 15 msec */
+#define SHARPSL_CHARGE_CO_CHECK_TIME           5   /* 5 msec */
+#define SHARPSL_CHARGE_RETRY_CNT               1   /* eqv. 10 min */
+
+#define SHARPSL_CHARGE_ON_VOLT         0x99  /* 2.9V */
+#define SHARPSL_CHARGE_ON_TEMP         0xe0  /* 2.9V */
+#define SHARPSL_CHARGE_ON_JKVAD_HIGH   0x9b  /* 6V */
+#define SHARPSL_CHARGE_ON_JKVAD_LOW    0x34  /* 2V */
+#define SHARPSL_FATAL_ACIN_VOLT        182   /* 3.45V */
+#define SHARPSL_FATAL_NOACIN_VOLT      170   /* 3.40V */
+
+struct battery_thresh spitz_battery_levels_acin[] = {
+	{ 213, 100},
+	{ 212,  98},
+	{ 211,  95},
+	{ 210,  93},
+	{ 209,  90},
+	{ 208,  88},
+	{ 207,  85},
+	{ 206,  83},
+	{ 205,  80},
+	{ 204,  78},
+	{ 203,  75},
+	{ 202,  73},
+	{ 201,  70},
+	{ 200,  68},
+	{ 199,  65},
+	{ 198,  63},
+	{ 197,  60},
+	{ 196,  58},
+	{ 195,  55},
+	{ 194,  53},
+	{ 193,  50},
+	{ 192,  48},
+	{ 192,  45},
+	{ 191,  43},
+	{ 191,  40},
+	{ 190,  38},
+	{ 190,  35},
+	{ 189,  33},
+	{ 188,  30},
+	{ 187,  28},
+	{ 186,  25},
+	{ 185,  23},
+	{ 184,  20},
+	{ 183,  18},
+	{ 182,  15},
+	{ 181,  13},
+	{ 180,  10},
+	{ 179,   8},
+	{ 178,   5},
+	{   0,   0},
+};
+
+struct battery_thresh  spitz_battery_levels_noac[] = {
+	{ 213, 100},
+	{ 212,  98},
+	{ 211,  95},
+	{ 210,  93},
+	{ 209,  90},
+	{ 208,  88},
+	{ 207,  85},
+	{ 206,  83},
+	{ 205,  80},
+	{ 204,  78},
+	{ 203,  75},
+	{ 202,  73},
+	{ 201,  70},
+	{ 200,  68},
+	{ 199,  65},
+	{ 198,  63},
+	{ 197,  60},
+	{ 196,  58},
+	{ 195,  55},
+	{ 194,  53},
+	{ 193,  50},
+	{ 192,  48},
+	{ 191,  45},
+	{ 190,  43},
+	{ 189,  40},
+	{ 188,  38},
+	{ 187,  35},
+	{ 186,  33},
+	{ 185,  30},
+	{ 184,  28},
+	{ 183,  25},
+	{ 182,  23},
+	{ 181,  20},
+	{ 180,  18},
+	{ 179,  15},
+	{ 178,  13},
+	{ 177,  10},
+	{ 176,   8},
+	{ 175,   5},
+	{   0,   0},
+};
+
+/* MAX1111 Commands */
+#define MAXCTRL_PD0      1u << 0
+#define MAXCTRL_PD1      1u << 1
+#define MAXCTRL_SGL      1u << 2
+#define MAXCTRL_UNI      1u << 3
+#define MAXCTRL_SEL_SH   4
+#define MAXCTRL_STR      1u << 7
+
+/* MAX1111 Channel Definitions */
+#define BATT_AD    4u
+#define BATT_THM   2u
+#define JK_VAD     6u
+
+
+/*
+ * Prototypes
+ */
+static int sharpsl_read_MainBattery(void);
+static int sharpsl_off_charge_battery(void);
+static int sharpsl_check_battery(int mode);
+static int sharpsl_ac_check(void);
+static int sharpsl_fatal_check(void);
+static int sharpsl_average_value(int ad);
+static void sharpsl_average_clear(void);
+static void sharpsl_charge_toggle(void *private_);
+static void sharpsl_battery_thread(void *private_);
+
+
+/*
+ * Variables
+ */
+struct sharpsl_pm_status sharpsl_pm;
+DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
+DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+
+
+static int get_percentage(int voltage)
+{
+	int i = sharpsl_pm.machinfo->bat_levels - 1;
+	struct battery_thresh *thresh;
+
+	if (sharpsl_pm.charge_mode == CHRG_ON)
+		thresh=sharpsl_pm.machinfo->bat_levels_acin;
+	else
+		thresh=sharpsl_pm.machinfo->bat_levels_noac;
+
+	while (i > 0 && (voltage > thresh[i].voltage))
+		i--;
+
+	return thresh[i].percentage;
+}
+
+static int get_apm_status(int voltage)
+{
+	int low_thresh, high_thresh;
+
+	if (sharpsl_pm.charge_mode == CHRG_ON) {
+		high_thresh = sharpsl_pm.machinfo->status_high_acin;
+		low_thresh = sharpsl_pm.machinfo->status_low_acin;
+	} else {
+		high_thresh = sharpsl_pm.machinfo->status_high_noac;
+		low_thresh = sharpsl_pm.machinfo->status_low_noac;
+	}
+
+	if (voltage >= high_thresh)
+		return APM_BATTERY_STATUS_HIGH;
+	if (voltage >= low_thresh)
+		return APM_BATTERY_STATUS_LOW;
+	return APM_BATTERY_STATUS_CRITICAL;
+}
+
+void sharpsl_battery_kick(void)
+{
+	schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125));
+}
+EXPORT_SYMBOL(sharpsl_battery_kick);
+
+
+static void sharpsl_battery_thread(void *private_)
+{
+	int voltage, percent, apm_status, i = 0;
+
+	if (!sharpsl_pm.machinfo)
+		return;
+
+	sharpsl_pm.battstat.ac_status = (!(STATUS_AC_IN) ? APM_AC_OFFLINE : APM_AC_ONLINE);
+
+	/* Corgi cannot confirm when battery fully charged so periodically kick! */
+	if (machine_is_corgi() && (sharpsl_pm.charge_mode == CHRG_ON)
+			&& time_after(jiffies, sharpsl_pm.charge_start_time +  SHARPSL_CHARGE_ON_TIME_INTERVAL))
+		schedule_work(&toggle_charger);
+
+	while(1) {
+		voltage = sharpsl_read_MainBattery();
+		if (voltage > 0) break;
+		if (i++ > 5) {
+			voltage = sharpsl_pm.machinfo->bat_levels_noac[0].voltage;
+			dev_warn(sharpsl_pm.dev, "Warning: Cannot read main battery!\n");
+			break;
+		}
+	}
+
+	voltage = sharpsl_average_value(voltage);
+	apm_status = get_apm_status(voltage);
+	percent = get_percentage(voltage);
+
+	/* At low battery voltages, the voltage has a tendency to start
+           creeping back up so we try to avoid this here */
+	if ((sharpsl_pm.battstat.ac_status == APM_AC_ONLINE) || (apm_status == APM_BATTERY_STATUS_HIGH) ||  percent <= sharpsl_pm.battstat.mainbat_percent) {
+		sharpsl_pm.battstat.mainbat_voltage = voltage;
+		sharpsl_pm.battstat.mainbat_status = apm_status;
+		sharpsl_pm.battstat.mainbat_percent = percent;
+	}
+
+	dev_dbg(sharpsl_pm.dev, "Battery: voltage: %d, status: %d, percentage: %d, time: %d\n", voltage,
+			sharpsl_pm.battstat.mainbat_status, sharpsl_pm.battstat.mainbat_percent, jiffies);
+
+	/* If battery is low. limit backlight intensity to save power. */
+	if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE)
+			&& ((sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_LOW) ||
+			(sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL))) {
+		if (!(sharpsl_pm.flags & SHARPSL_BL_LIMIT)) {
+			corgibl_limit_intensity(1);
+			sharpsl_pm.flags |= SHARPSL_BL_LIMIT;
+		}
+	} else if (sharpsl_pm.flags & SHARPSL_BL_LIMIT) {
+		corgibl_limit_intensity(0);
+		sharpsl_pm.flags &= ~SHARPSL_BL_LIMIT;
+	}
+
+	/* Suspend if critical battery level */
+	if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE)
+			&& (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL)
+			&& !(sharpsl_pm.flags & SHARPSL_APM_QUEUED)) {
+		sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
+		dev_err(sharpsl_pm.dev, "Fatal Off\n");
+		apm_queue_event(APM_CRITICAL_SUSPEND);
+	}
+
+	schedule_delayed_work(&sharpsl_bat, SHARPSL_BATCHK_TIME);
+}
+
+static void sharpsl_charge_on(void)
+{
+	dev_dbg(sharpsl_pm.dev, "Turning Charger On\n");
+
+	sharpsl_pm.full_count = 0;
+	sharpsl_pm.charge_mode = CHRG_ON;
+	schedule_delayed_work(&toggle_charger, msecs_to_jiffies(250));
+	schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(500));
+}
+
+static void sharpsl_charge_off(void)
+{
+	dev_dbg(sharpsl_pm.dev, "Turning Charger Off\n");
+
+	CHARGE_OFF();
+	CHARGE_LED_OFF();
+	sharpsl_pm.charge_mode = CHRG_OFF;
+
+	schedule_work(&sharpsl_bat);
+}
+
+static void sharpsl_charge_error(void)
+{
+	CHARGE_LED_ERR();
+	CHARGE_OFF();
+	sharpsl_pm.charge_mode = CHRG_ERROR;
+}
+
+static void sharpsl_charge_toggle(void *private_)
+{
+	dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
+
+	if (STATUS_AC_IN == 0) {
+		sharpsl_charge_off();
+		return;
+	} else if ((sharpsl_check_battery(1) < 0) || (sharpsl_ac_check() < 0)) {
+		sharpsl_charge_error();
+		return;
+	}
+
+	CHARGE_LED_ON();
+	CHARGE_OFF();
+	mdelay(SHARPSL_CHARGE_WAIT_TIME);
+	CHARGE_ON();
+
+	sharpsl_pm.charge_start_time = jiffies;
+}
+
+static void sharpsl_ac_timer(unsigned long data)
+{
+	int acin = STATUS_AC_IN;
+
+	dev_dbg(sharpsl_pm.dev, "AC Status: %d\n",acin);
+
+	sharpsl_average_clear();
+	if (acin && (sharpsl_pm.charge_mode != CHRG_ON))
+		sharpsl_charge_on();
+	else if (sharpsl_pm.charge_mode == CHRG_ON)
+		sharpsl_charge_off();
+
+	schedule_work(&sharpsl_bat);
+}
+
+
+static irqreturn_t sharpsl_ac_isr(int irq, void *dev_id, struct pt_regs *fp)
+{
+	/* Delay the event slightly to debounce */
+	/* Must be a smaller delay than the chrg_full_isr below */
+	mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
+
+	return IRQ_HANDLED;
+}
+
+static void sharpsl_chrg_full_timer(unsigned long data)
+{
+	dev_dbg(sharpsl_pm.dev, "Charge Full at time: %lx\n", jiffies);
+
+	sharpsl_pm.full_count++;
+
+	if (STATUS_AC_IN == 0) {
+		dev_dbg(sharpsl_pm.dev, "Charge Full: AC removed - stop charging!\n");
+		if (sharpsl_pm.charge_mode == CHRG_ON)
+			sharpsl_charge_off();
+	} else if (sharpsl_pm.full_count < 2) {
+		dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
+		schedule_work(&toggle_charger);
+	} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
+		dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
+		schedule_work(&toggle_charger);
+	} else {
+		sharpsl_charge_off();
+		sharpsl_pm.charge_mode = CHRG_DONE;
+		dev_dbg(sharpsl_pm.dev, "Charge Full: Charging Finished\n");
+	}
+}
+
+/* Charging Finished Interrupt (Not present on Corgi) */
+/* Can trigger at the same time as an AC staus change so
+   delay until after that has been processed */
+static irqreturn_t sharpsl_chrg_full_isr(int irq, void *dev_id, struct pt_regs *fp)
+{
+	if (sharpsl_pm.flags & SHARPSL_SUSPENDED)
+		return IRQ_HANDLED;
+
+	/* delay until after any ac interrupt */
+	mod_timer(&sharpsl_pm.chrg_full_timer, jiffies + msecs_to_jiffies(500));
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t sharpsl_fatal_isr(int irq, void *dev_id, struct pt_regs *fp)
+{
+	int is_fatal = 0;
+
+	if (STATUS_BATT_LOCKED == 0) {
+		dev_err(sharpsl_pm.dev, "Battery now Unlocked! Suspending.\n");
+		is_fatal = 1;
+	}
+
+	if (sharpsl_pm.machinfo->gpio_fatal && (STATUS_FATAL == 0)) {
+		dev_err(sharpsl_pm.dev, "Fatal Batt Error! Suspending.\n");
+		is_fatal = 1;
+	}
+
+	if (!(sharpsl_pm.flags & SHARPSL_APM_QUEUED) && is_fatal) {
+		sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
+		apm_queue_event(APM_CRITICAL_SUSPEND);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Maintain an average of the last 10 readings
+ */
+#define SHARPSL_CNV_VALUE_NUM    10
+static int sharpsl_ad_index;
+
+static void sharpsl_average_clear(void)
+{
+	sharpsl_ad_index = 0;
+}
+
+static int sharpsl_average_value(int ad)
+{
+	int i, ad_val = 0;
+	static int sharpsl_ad[SHARPSL_CNV_VALUE_NUM+1];
+
+	if (sharpsl_pm.battstat.mainbat_status != APM_BATTERY_STATUS_HIGH) {
+		sharpsl_ad_index = 0;
+		return ad;
+	}
+
+	sharpsl_ad[sharpsl_ad_index] = ad;
+	sharpsl_ad_index++;
+	if (sharpsl_ad_index >= SHARPSL_CNV_VALUE_NUM) {
+		for (i=0; i < (SHARPSL_CNV_VALUE_NUM-1); i++)
+			sharpsl_ad[i] = sharpsl_ad[i+1];
+		sharpsl_ad_index = SHARPSL_CNV_VALUE_NUM - 1;
+	}
+	for (i=0; i < sharpsl_ad_index; i++)
+		ad_val += sharpsl_ad[i];
+
+	return (ad_val / sharpsl_ad_index);
+}
+
+
+/*
+ * Read MAX1111 ADC
+ */
+static int read_max1111(int channel)
+{
+	return corgi_ssp_max1111_get((channel << MAXCTRL_SEL_SH) | MAXCTRL_PD0 | MAXCTRL_PD1
+			| MAXCTRL_SGL | MAXCTRL_UNI | MAXCTRL_STR);
+}
+
+static int sharpsl_read_MainBattery(void)
+{
+	return read_max1111(BATT_AD);
+}
+
+static int sharpsl_read_Temp(void)
+{
+	int temp;
+
+	sharpsl_pm.machinfo->measure_temp(1);
+
+	mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
+	temp = read_max1111(BATT_THM);
+
+	sharpsl_pm.machinfo->measure_temp(0);
+
+	return temp;
+}
+
+static int sharpsl_read_jkvad(void)
+{
+	return read_max1111(JK_VAD);
+}
+
+/*
+ * Take an array of 5 integers, remove the maximum and minimum values
+ * and return the average.
+ */
+static int get_select_val(int *val)
+{
+	int i, j, k, temp, sum = 0;
+
+	/* Find MAX val */
+	temp = val[0];
+	j=0;
+	for (i=1; i<5; i++) {
+		if (temp < val[i]) {
+			temp = val[i];
+			j = i;
+		}
+	}
+
+	/* Find MIN val */
+	temp = val[4];
+	k=4;
+	for (i=3; i>=0; i--) {
+		if (temp > val[i]) {
+			temp = val[i];
+			k = i;
+		}
+	}
+
+	for (i=0; i<5; i++)
+		if (i != j && i != k )
+			sum += val[i];
+
+	dev_dbg(sharpsl_pm.dev, "Average: %d from values: %d, %d, %d, %d, %d\n", sum/3, val[0], val[1], val[2], val[3], val[4]);
+
+	return (sum/3);
+}
+
+/*  mode 0 - Check temperature and voltage
+ *       1 - Check temperature only */
+static int sharpsl_check_battery(int mode)
+{
+	int val, i, buff[5];
+
+	/* Check battery temperature */
+	for (i=0; i<5; i++) {
+		mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
+		buff[i] = sharpsl_read_Temp();
+	}
+
+	val = get_select_val(buff);
+
+	dev_dbg(sharpsl_pm.dev, "Temperature: %d\n", val);
+	if (val > SHARPSL_CHARGE_ON_TEMP)
+		return -1;
+	if (mode == 1)
+		return 0;
+
+	/* disable charge, enable discharge */
+	CHARGE_OFF();
+	DISCHARGE_ON();
+	mdelay(SHARPSL_WAIT_DISCHARGE_ON);
+
+	if (sharpsl_pm.machinfo->discharge1)
+		sharpsl_pm.machinfo->discharge1(1);
+
+	/* Check battery voltage */
+	for (i=0; i<5; i++) {
+		buff[i] = sharpsl_read_MainBattery();
+		mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
+	}
+
+	if (sharpsl_pm.machinfo->discharge1)
+		sharpsl_pm.machinfo->discharge1(0);
+
+	DISCHARGE_OFF();
+
+	val = get_select_val(buff);
+	dev_dbg(sharpsl_pm.dev, "Battery Voltage: %d\n", val);
+
+	if (val < SHARPSL_CHARGE_ON_VOLT)
+		return -1;
+
+	return 0;
+}
+
+static int sharpsl_ac_check(void)
+{
+	int temp, i, buff[5];
+
+	for (i=0; i<5; i++) {
+		buff[i] = sharpsl_read_jkvad();
+		mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_JKVAD);
+	}
+
+	temp = get_select_val(buff);
+	dev_dbg(sharpsl_pm.dev, "AC Voltage: %d\n",temp);
+
+	if ((temp > SHARPSL_CHARGE_ON_JKVAD_HIGH) || (temp < SHARPSL_CHARGE_ON_JKVAD_LOW)) {
+		dev_err(sharpsl_pm.dev, "Error: AC check failed.\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int sharpsl_pm_suspend(struct device *dev, pm_message_t state)
+{
+	sharpsl_pm.flags |= SHARPSL_SUSPENDED;
+	flush_scheduled_work();
+
+	if (sharpsl_pm.charge_mode == CHRG_ON)
+		sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
+	else
+		sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
+
+	return 0;
+}
+
+static int sharpsl_pm_resume(struct device *dev)
+{
+	/* Clear the reset source indicators as they break the bootloader upon reboot */
+	RCSR = 0x0f;
+	sharpsl_average_clear();
+	sharpsl_pm.flags &= ~SHARPSL_APM_QUEUED;
+	sharpsl_pm.flags &= ~SHARPSL_SUSPENDED;
+
+	return 0;
+}
+
+static void corgi_goto_sleep(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
+{
+	dev_dbg(sharpsl_pm.dev, "Time is: %08x\n",RCNR);
+
+	dev_dbg(sharpsl_pm.dev, "Offline Charge Activate = %d\n",sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG);
+	/* not charging and AC-IN! */
+
+	if ((sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG) && (STATUS_AC_IN != 0)) {
+		dev_dbg(sharpsl_pm.dev, "Activating Offline Charger...\n");
+		sharpsl_pm.charge_mode = CHRG_OFF;
+		sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
+		sharpsl_off_charge_battery();
+	}
+
+	sharpsl_pm.machinfo->presuspend();
+
+	PEDR = 0xffffffff; /* clear it */
+
+	sharpsl_pm.flags &= ~SHARPSL_ALARM_ACTIVE;
+	if ((sharpsl_pm.charge_mode == CHRG_ON) && ((alarm_enable && ((alarm_time - RCNR) > (SHARPSL_BATCHK_TIME_SUSPEND + 30))) || !alarm_enable)) {
+		RTSR &= RTSR_ALE;
+		RTAR = RCNR + SHARPSL_BATCHK_TIME_SUSPEND;
+		dev_dbg(sharpsl_pm.dev, "Charging alarm at: %08x\n",RTAR);
+		sharpsl_pm.flags |= SHARPSL_ALARM_ACTIVE;
+	} else if (alarm_enable) {
+		RTSR &= RTSR_ALE;
+		RTAR = alarm_time;
+		dev_dbg(sharpsl_pm.dev, "User alarm at: %08x\n",RTAR);
+	} else {
+		dev_dbg(sharpsl_pm.dev, "No alarms set.\n");
+	}
+
+	pxa_pm_enter(state);
+
+	sharpsl_pm.machinfo->postsuspend();
+
+	dev_dbg(sharpsl_pm.dev, "Corgi woken up from suspend: %08x\n",PEDR);
+}
+
+static int corgi_enter_suspend(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
+{
+	if (!sharpsl_pm.machinfo->should_wakeup(!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE) && alarm_enable) )
+	{
+		if (!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE)) {
+			dev_dbg(sharpsl_pm.dev, "No user triggered wakeup events and not charging. Strange. Suspend.\n");
+			corgi_goto_sleep(alarm_time, alarm_enable, state);
+			return 1;
+		}
+		if(sharpsl_off_charge_battery()) {
+			dev_dbg(sharpsl_pm.dev, "Charging. Suspend...\n");
+			corgi_goto_sleep(alarm_time, alarm_enable, state);
+			return 1;
+		}
+		dev_dbg(sharpsl_pm.dev, "User triggered wakeup in offline charger.\n");
+	}
+
+	if ((STATUS_BATT_LOCKED == 0) || (sharpsl_fatal_check() < 0) )
+	{
+		dev_err(sharpsl_pm.dev, "Fatal condition. Suspend.\n");
+		corgi_goto_sleep(alarm_time, alarm_enable, state);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int corgi_pxa_pm_enter(suspend_state_t state)
+{
+	unsigned long alarm_time = RTAR;
+	unsigned int alarm_status = ((RTSR & RTSR_ALE) != 0);
+
+	dev_dbg(sharpsl_pm.dev, "SharpSL suspending for first time.\n");
+
+	corgi_goto_sleep(alarm_time, alarm_status, state);
+
+	while (corgi_enter_suspend(alarm_time,alarm_status,state))
+		{}
+
+	dev_dbg(sharpsl_pm.dev, "SharpSL resuming...\n");
+
+	return 0;
+}
+#endif
+
+
+/*
+ * Check for fatal battery errors
+ * Fatal returns -1
+ */
+static int sharpsl_fatal_check(void)
+{
+	int buff[5], temp, i, acin;
+
+	dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check entered\n");
+
+	/* Check AC-Adapter */
+	acin = STATUS_AC_IN;
+
+	if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
+		CHARGE_OFF();
+		udelay(100);
+		DISCHARGE_ON();	/* enable discharge */
+		mdelay(SHARPSL_WAIT_DISCHARGE_ON);
+	}
+
+	if (sharpsl_pm.machinfo->discharge1)
+		sharpsl_pm.machinfo->discharge1(1);
+
+	/* Check battery : check inserting battery ? */
+	for (i=0; i<5; i++) {
+		buff[i] = sharpsl_read_MainBattery();
+		mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
+	}
+
+	if (sharpsl_pm.machinfo->discharge1)
+		sharpsl_pm.machinfo->discharge1(0);
+
+	if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
+		udelay(100);
+		CHARGE_ON();
+		DISCHARGE_OFF();
+	}
+
+	temp = get_select_val(buff);
+	dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check: acin: %d, discharge voltage: %d, no discharge: %d\n", acin, temp, sharpsl_read_MainBattery());
+
+	if ((acin && (temp < SHARPSL_FATAL_ACIN_VOLT)) ||
+			(!acin && (temp < SHARPSL_FATAL_NOACIN_VOLT)))
+		return -1;
+	return 0;
+}
+
+static int sharpsl_off_charge_error(void)
+{
+	dev_err(sharpsl_pm.dev, "Offline Charger: Error occured.\n");
+	CHARGE_OFF();
+	CHARGE_LED_ERR();
+	sharpsl_pm.charge_mode = CHRG_ERROR;
+	return 1;
+}
+
+/*
+ * Charging Control while suspended
+ * Return 1 - go straight to sleep
+ * Return 0 - sleep or wakeup depending on other factors
+ */
+static int sharpsl_off_charge_battery(void)
+{
+	int time;
+
+	dev_dbg(sharpsl_pm.dev, "Charge Mode: %d\n", sharpsl_pm.charge_mode);
+
+	if (sharpsl_pm.charge_mode == CHRG_OFF) {
+		dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 1\n");
+
+		/* AC Check */
+		if ((sharpsl_ac_check() < 0) || (sharpsl_check_battery(1) < 0))
+			return sharpsl_off_charge_error();
+
+		/* Start Charging */
+		CHARGE_LED_ON();
+		CHARGE_OFF();
+		mdelay(SHARPSL_CHARGE_WAIT_TIME);
+		CHARGE_ON();
+
+		sharpsl_pm.charge_mode = CHRG_ON;
+		sharpsl_pm.full_count = 0;
+
+		return 1;
+	} else if (sharpsl_pm.charge_mode != CHRG_ON) {
+		return 1;
+	}
+
+	if (sharpsl_pm.full_count == 0) {
+		int time;
+
+		dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 2\n");
+
+		if (sharpsl_check_battery(0) < 0)
+			return sharpsl_off_charge_error();
+
+		CHARGE_OFF();
+		mdelay(SHARPSL_CHARGE_WAIT_TIME);
+		CHARGE_ON();
+		sharpsl_pm.charge_mode = CHRG_ON;
+
+		mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
+
+		time = RCNR;
+		while(1) {
+			/* Check if any wakeup event had occured */
+			if (sharpsl_pm.machinfo->charger_wakeup() != 0)
+				return 0;
+			/* Check for timeout */
+			if ((RCNR - time) > SHARPSL_WAIT_CO_TIME)
+				return 1;
+			if (STATUS_CHRG_FULL) {
+				dev_dbg(sharpsl_pm.dev, "Offline Charger: Charge full occured. Retrying to check\n");
+	   			sharpsl_pm.full_count++;
+				CHARGE_OFF();
+				mdelay(SHARPSL_CHARGE_WAIT_TIME);
+				CHARGE_ON();
+				return 1;
+			}
+		}
+	}
+
+	dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 3\n");
+
+	mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
+
+	time = RCNR;
+	while(1) {
+		/* Check if any wakeup event had occured */
+		if (sharpsl_pm.machinfo->charger_wakeup() != 0)
+			return 0;
+		/* Check for timeout */
+		if ((RCNR-time) > SHARPSL_WAIT_CO_TIME) {
+			if (sharpsl_pm.full_count > SHARPSL_CHARGE_RETRY_CNT) {
+				dev_dbg(sharpsl_pm.dev, "Offline Charger: Not charged sufficiently. Retrying.\n");
+				sharpsl_pm.full_count = 0;
+			}
+			sharpsl_pm.full_count++;
+			return 1;
+		}
+		if (STATUS_CHRG_FULL) {
+			dev_dbg(sharpsl_pm.dev, "Offline Charger: Charging complete.\n");
+			CHARGE_LED_OFF();
+			CHARGE_OFF();
+			sharpsl_pm.charge_mode = CHRG_DONE;
+			return 1;
+		}
+	}
+}
+
+
+static ssize_t battery_percentage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n",sharpsl_pm.battstat.mainbat_percent);
+}
+
+static ssize_t battery_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n",sharpsl_pm.battstat.mainbat_voltage);
+}
+
+static DEVICE_ATTR(battery_percentage, 0444, battery_percentage_show, NULL);
+static DEVICE_ATTR(battery_voltage, 0444, battery_voltage_show, NULL);
+
+extern void (*apm_get_power_status)(struct apm_power_info *);
+
+static void sharpsl_apm_get_power_status(struct apm_power_info *info)
+{
+	info->ac_line_status = sharpsl_pm.battstat.ac_status;
+
+	if (sharpsl_pm.charge_mode == CHRG_ON)
+		info->battery_status = APM_BATTERY_STATUS_CHARGING;
+	else
+		info->battery_status = sharpsl_pm.battstat.mainbat_status;
+
+	info->battery_flag = (1 << info->battery_status);
+	info->battery_life = sharpsl_pm.battstat.mainbat_percent;
+}
+
+static struct pm_ops sharpsl_pm_ops = {
+	.pm_disk_mode	= PM_DISK_FIRMWARE,
+	.prepare	= pxa_pm_prepare,
+	.enter		= corgi_pxa_pm_enter,
+	.finish		= pxa_pm_finish,
+};
+
+static int __init sharpsl_pm_probe(struct device *dev)
+{
+	if (!dev->platform_data)
+		return -EINVAL;
+
+	sharpsl_pm.dev = dev;
+	sharpsl_pm.machinfo = dev->platform_data;
+	sharpsl_pm.charge_mode = CHRG_OFF;
+	sharpsl_pm.flags = 0;
+
+	sharpsl_pm.machinfo->init();
+
+	init_timer(&sharpsl_pm.ac_timer);
+	sharpsl_pm.ac_timer.function = sharpsl_ac_timer;
+
+	init_timer(&sharpsl_pm.chrg_full_timer);
+	sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
+
+	pxa_gpio_mode(sharpsl_pm.machinfo->gpio_acin | GPIO_IN);
+	pxa_gpio_mode(sharpsl_pm.machinfo->gpio_batfull | GPIO_IN);
+	pxa_gpio_mode(sharpsl_pm.machinfo->gpio_batlock | GPIO_IN);
+
+	/* Register interrupt handlers */
+	if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr, SA_INTERRUPT, "AC Input Detect", sharpsl_ac_isr)) {
+		dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin));
+	}
+	else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin),IRQT_BOTHEDGE);
+
+	if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr, SA_INTERRUPT, "Battery Cover", sharpsl_fatal_isr)) {
+		dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock));
+	}
+	else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock),IRQT_FALLING);
+
+	if (sharpsl_pm.machinfo->gpio_fatal) {
+		if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr, SA_INTERRUPT, "Fatal Battery", sharpsl_fatal_isr)) {
+			dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal));
+		}
+		else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal),IRQT_FALLING);
+	}
+
+	if (!machine_is_corgi())
+	{
+		/* Register interrupt handler. */
+		if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr, SA_INTERRUPT, "CO", sharpsl_chrg_full_isr)) {
+			dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull));
+		}
+		else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull),IRQT_RISING);
+	}
+
+	device_create_file(dev, &dev_attr_battery_percentage);
+	device_create_file(dev, &dev_attr_battery_voltage);
+
+	apm_get_power_status = sharpsl_apm_get_power_status;
+
+	pm_set_ops(&sharpsl_pm_ops);
+
+	mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
+
+	return 0;
+}
+
+static int sharpsl_pm_remove(struct device *dev)
+{
+	pm_set_ops(NULL);
+
+	device_remove_file(dev, &dev_attr_battery_percentage);
+	device_remove_file(dev, &dev_attr_battery_voltage);
+
+	free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr);
+	free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr);
+
+	if (sharpsl_pm.machinfo->gpio_fatal)
+		free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr);
+
+	if (!machine_is_corgi())
+		free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr);
+
+	del_timer_sync(&sharpsl_pm.chrg_full_timer);
+	del_timer_sync(&sharpsl_pm.ac_timer);
+
+	return 0;
+}
+
+static struct device_driver sharpsl_pm_driver = {
+	.name		= "sharpsl-pm",
+	.bus		= &platform_bus_type,
+	.probe		= sharpsl_pm_probe,
+	.remove		= sharpsl_pm_remove,
+	.suspend	= sharpsl_pm_suspend,
+	.resume		= sharpsl_pm_resume,
+};
+
+static int __devinit sharpsl_pm_init(void)
+{
+	return driver_register(&sharpsl_pm_driver);
+}
+
+static void sharpsl_pm_exit(void)
+{
+ 	driver_unregister(&sharpsl_pm_driver);
+}
+
+late_initcall(sharpsl_pm_init);
+module_exit(sharpsl_pm_exit);
diff --git a/arch/arm/mach-pxa/ssp.c b/arch/arm/mach-pxa/ssp.c
index 4d826c0..a68b30e 100644
--- a/arch/arm/mach-pxa/ssp.c
+++ b/arch/arm/mach-pxa/ssp.c
@@ -19,6 +19,8 @@
  *   22nd Aug 2003 Initial version.
  *   20th Dec 2004 Added ssp_config for changing port config without
  *                 closing the port.
+ *    4th Aug 2005 Added option to disable irq handler registration and
+ *                 cleaned up irq and clock detection.
  */
 
 #include <linux/module.h>
@@ -37,6 +39,26 @@
 
 #define PXA_SSP_PORTS 	3
 
+struct ssp_info_ {
+	int irq;
+	u32 clock;
+};
+
+/*
+ * SSP port clock and IRQ settings
+ */
+static const struct ssp_info_ ssp_info[PXA_SSP_PORTS] = {
+#if defined (CONFIG_PXA27x)
+	{IRQ_SSP,	CKEN23_SSP1},
+	{IRQ_SSP2,	CKEN3_SSP2},
+	{IRQ_SSP3,	CKEN4_SSP3},
+#else
+	{IRQ_SSP,	CKEN3_SSP},
+	{IRQ_NSSP,	CKEN9_NSSP},
+	{IRQ_ASSP,	CKEN10_ASSP},
+#endif
+};
+
 static DECLARE_MUTEX(sem);
 static int use_count[PXA_SSP_PORTS] = {0, 0, 0};
 
@@ -210,9 +232,9 @@
  *   %-EBUSY	if the resources are already in use
  *   %0		on success
  */
-int ssp_init(struct ssp_dev *dev, u32 port)
+int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags)
 {
-	int ret, irq;
+	int ret;
 
 	if (port > PXA_SSP_PORTS || port == 0)
 		return -ENODEV;
@@ -229,61 +251,20 @@
 		up(&sem);
 		return -EBUSY;
 	}
-
-	switch (port) {
-		case 1:
-			irq = IRQ_SSP;
-			break;
-#if defined (CONFIG_PXA27x)
-		case 2:
-			irq = IRQ_SSP2;
-			break;
-		case 3:
-			irq = IRQ_SSP3;
-			break;
-#else
-		case 2:
-			irq = IRQ_NSSP;
-			break;
-		case 3:
-			irq = IRQ_ASSP;
-			break;
-#endif
-		default:
-			return -ENODEV;
-	}
-
 	dev->port = port;
 
-	ret = request_irq(irq, ssp_interrupt, 0, "SSP", dev);
-	if (ret)
-		goto out_region;
+	/* do we need to get irq */
+	if (!(init_flags & SSP_NO_IRQ)) {
+		ret = request_irq(ssp_info[port-1].irq, ssp_interrupt,
+				0, "SSP", dev);
+	    	if (ret)
+			goto out_region;
+	    	dev->irq = ssp_info[port-1].irq;
+	} else
+		dev->irq = 0;
 
 	/* turn on SSP port clock */
-	switch (dev->port) {
-#if defined (CONFIG_PXA27x)
-		case 1:
-			pxa_set_cken(CKEN23_SSP1, 1);
-			break;
-		case 2:
-			pxa_set_cken(CKEN3_SSP2, 1);
-			break;
-		case 3:
-			pxa_set_cken(CKEN4_SSP3, 1);
-			break;
-#else
-		case 1:
-			pxa_set_cken(CKEN3_SSP, 1);
-			break;
-		case 2:
-			pxa_set_cken(CKEN9_NSSP, 1);
-			break;
-		case 3:
-			pxa_set_cken(CKEN10_ASSP, 1);
-			break;
-#endif
-	}
-
+	pxa_set_cken(ssp_info[port-1].clock, 1);
 	up(&sem);
 	return 0;
 
@@ -301,46 +282,17 @@
  */
 void ssp_exit(struct ssp_dev *dev)
 {
-	int irq;
-
 	down(&sem);
 	SSCR0_P(dev->port) &= ~SSCR0_SSE;
 
-	/* find irq, save power and turn off SSP port clock */
-	switch (dev->port) {
-#if defined (CONFIG_PXA27x)
-		case 1:
-			irq = IRQ_SSP;
-			pxa_set_cken(CKEN23_SSP1, 0);
-			break;
-		case 2:
-			irq = IRQ_SSP2;
-			pxa_set_cken(CKEN3_SSP2, 0);
-			break;
-		case 3:
-			irq = IRQ_SSP3;
-			pxa_set_cken(CKEN4_SSP3, 0);
-			break;
-#else
-		case 1:
-			irq = IRQ_SSP;
-			pxa_set_cken(CKEN3_SSP, 0);
-			break;
-		case 2:
-			irq = IRQ_NSSP;
-			pxa_set_cken(CKEN9_NSSP, 0);
-			break;
-		case 3:
-			irq = IRQ_ASSP;
-			pxa_set_cken(CKEN10_ASSP, 0);
-			break;
-#endif
-		default:
-			printk(KERN_WARNING "SSP: tried to close invalid port\n");
-			return;
+    	if (dev->port > PXA_SSP_PORTS || dev->port == 0) {
+		printk(KERN_WARNING "SSP: tried to close invalid port\n");
+		return;
 	}
 
-	free_irq(irq, dev);
+	pxa_set_cken(ssp_info[dev->port-1].clock, 0);
+	if (dev->irq)
+		free_irq(dev->irq, dev);
 	release_mem_region(__PREG(SSCR0_P(dev->port)), 0x2c);
 	use_count[dev->port - 1]--;
 	up(&sem);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e3c14d6..e84fdde 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -102,8 +102,8 @@
 # ARM925T
 config CPU_ARM925T
  	bool "Support ARM925T processor" if ARCH_OMAP1
- 	depends on ARCH_OMAP1510
- 	default y if ARCH_OMAP1510
+ 	depends on ARCH_OMAP15XX
+ 	default y if ARCH_OMAP15XX
 	select CPU_32v4
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
@@ -242,7 +242,7 @@
 # ARMv6
 config CPU_V6
 	bool "Support ARM V6 processor"
-	depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB
+	depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2
 	select CPU_32v6
 	select CPU_ABRT_EV6
 	select CPU_CACHE_V6
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 7e144f9..9ccf194 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -3,7 +3,7 @@
 #
 
 # Common support
-obj-y := common.o sram.o sram-fn.o clock.o dma.o mux.o gpio.o mcbsp.o usb.o
+obj-y := common.o sram.o sram-fn.o clock.o devices.o dma.o mux.o gpio.o mcbsp.o usb.o
 obj-m :=
 obj-n :=
 obj-  :=
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index a020fe1..7ce39b9 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -1,15 +1,20 @@
 /*
  *  linux/arch/arm/plat-omap/clock.c
  *
- *  Copyright (C) 2004 Nokia corporation
+ *  Copyright (C) 2004 - 2005 Nokia corporation
  *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  *
+ *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/config.h>
 #include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
 #include <linux/list.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -18,562 +23,20 @@
 #include <asm/io.h>
 #include <asm/semaphore.h>
 #include <asm/hardware/clock.h>
-#include <asm/arch/board.h>
-#include <asm/arch/usb.h>
 
-#include "clock.h"
-#include "sram.h"
+#include <asm/arch/clock.h>
 
-static LIST_HEAD(clocks);
+LIST_HEAD(clocks);
 static DECLARE_MUTEX(clocks_sem);
-static DEFINE_SPINLOCK(clockfw_lock);
-static void propagate_rate(struct clk *  clk);
-/* UART clock function */
-static int set_uart_rate(struct clk * clk, unsigned long rate);
-/* External clock (MCLK & BCLK) functions */
-static int set_ext_clk_rate(struct clk *  clk, unsigned long rate);
-static long round_ext_clk_rate(struct clk *  clk, unsigned long rate);
-static void init_ext_clk(struct clk *  clk);
-/* MPU virtual clock functions */
-static int select_table_rate(struct clk *  clk, unsigned long rate);
-static long round_to_table_rate(struct clk *  clk, unsigned long rate);
-void clk_setdpll(__u16, __u16);
+DEFINE_SPINLOCK(clockfw_lock);
 
-static struct mpu_rate rate_table[] = {
-	/* MPU MHz, xtal MHz, dpll1 MHz, CKCTL, DPLL_CTL
-	 * armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
-	 */
-#if defined(CONFIG_OMAP_ARM_216MHZ)
-	{ 216000000, 12000000, 216000000, 0x050d, 0x2910 }, /* 1/1/2/2/2/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_195MHZ)
-	{ 195000000, 13000000, 195000000, 0x050e, 0x2790 }, /* 1/1/2/2/4/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_192MHZ)
-	{ 192000000, 19200000, 192000000, 0x050f, 0x2510 }, /* 1/1/2/2/8/8 */
-	{ 192000000, 12000000, 192000000, 0x050f, 0x2810 }, /* 1/1/2/2/8/8 */
-	{  96000000, 12000000, 192000000, 0x055f, 0x2810 }, /* 2/2/2/2/8/8 */
-	{  48000000, 12000000, 192000000, 0x0baf, 0x2810 }, /* 4/8/4/4/8/8 */
-	{  24000000, 12000000, 192000000, 0x0fff, 0x2810 }, /* 8/8/8/8/8/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_182MHZ)
-	{ 182000000, 13000000, 182000000, 0x050e, 0x2710 }, /* 1/1/2/2/4/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_168MHZ)
-	{ 168000000, 12000000, 168000000, 0x010f, 0x2710 }, /* 1/1/1/2/8/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_150MHZ)
-	{ 150000000, 12000000, 150000000, 0x010a, 0x2cb0 }, /* 1/1/1/2/4/4 */
-#endif
-#if defined(CONFIG_OMAP_ARM_120MHZ)
-	{ 120000000, 12000000, 120000000, 0x010a, 0x2510 }, /* 1/1/1/2/4/4 */
-#endif
-#if defined(CONFIG_OMAP_ARM_96MHZ)
-	{  96000000, 12000000,  96000000, 0x0005, 0x2410 }, /* 1/1/1/1/2/2 */
-#endif
-#if defined(CONFIG_OMAP_ARM_60MHZ)
-	{  60000000, 12000000,  60000000, 0x0005, 0x2290 }, /* 1/1/1/1/2/2 */
-#endif
-#if defined(CONFIG_OMAP_ARM_30MHZ)
-	{  30000000, 12000000,  60000000, 0x0555, 0x2290 }, /* 2/2/2/2/2/2 */
-#endif
-	{ 0, 0, 0, 0, 0 },
-};
+static struct clk_functions *arch_clock;
 
+/*-------------------------------------------------------------------------
+ * Standard clock functions defined in asm/hardware/clock.h
+ *-------------------------------------------------------------------------*/
 
-static void ckctl_recalc(struct clk *  clk);
-int __clk_enable(struct clk *clk);
-void __clk_disable(struct clk *clk);
-void __clk_unuse(struct clk *clk);
-int __clk_use(struct clk *clk);
-
-
-static void followparent_recalc(struct clk *  clk)
-{
-	clk->rate = clk->parent->rate;
-}
-
-
-static void watchdog_recalc(struct clk *  clk)
-{
-	clk->rate = clk->parent->rate / 14;
-}
-
-static void uart_recalc(struct clk * clk)
-{
-	unsigned int val = omap_readl(clk->enable_reg);
-	if (val & clk->enable_bit)
-		clk->rate = 48000000;
-	else
-		clk->rate = 12000000;
-}
-
-static struct clk ck_ref = {
-	.name		= "ck_ref",
-	.rate		= 12000000,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  ALWAYS_ENABLED,
-};
-
-static struct clk ck_dpll1 = {
-	.name		= "ck_dpll1",
-	.parent		= &ck_ref,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  RATE_PROPAGATES | ALWAYS_ENABLED,
-};
-
-static struct clk ck_dpll1out = {
-	.name		= "ck_dpll1out",
-	.parent		= &ck_dpll1,
-	.flags		= CLOCK_IN_OMAP16XX,
-	.enable_reg	= ARM_IDLECT2,
-	.enable_bit	= EN_CKOUT_ARM,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk arm_ck = {
-	.name		= "arm_ck",
-	.parent		= &ck_dpll1,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
-	.rate_offset	= CKCTL_ARMDIV_OFFSET,
-	.recalc		= &ckctl_recalc,
-};
-
-static struct clk armper_ck = {
-	.name		= "armper_ck",
-	.parent		= &ck_dpll1,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  RATE_CKCTL,
-	.enable_reg	= ARM_IDLECT2,
-	.enable_bit	= EN_PERCK,
-	.rate_offset	= CKCTL_PERDIV_OFFSET,
-	.recalc		= &ckctl_recalc,
-};
-
-static struct clk arm_gpio_ck = {
-	.name		= "arm_gpio_ck",
-	.parent		= &ck_dpll1,
-	.flags		= CLOCK_IN_OMAP1510,
-	.enable_reg	= ARM_IDLECT2,
-	.enable_bit	= EN_GPIOCK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk armxor_ck = {
-	.name		= "armxor_ck",
-	.parent		= &ck_ref,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
-	.enable_reg	= ARM_IDLECT2,
-	.enable_bit	= EN_XORPCK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk armtim_ck = {
-	.name		= "armtim_ck",
-	.parent		= &ck_ref,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
-	.enable_reg	= ARM_IDLECT2,
-	.enable_bit	= EN_TIMCK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk armwdt_ck = {
-	.name		= "armwdt_ck",
-	.parent		= &ck_ref,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
-	.enable_reg	= ARM_IDLECT2,
-	.enable_bit	= EN_WDTCK,
-	.recalc		= &watchdog_recalc,
-};
-
-static struct clk arminth_ck16xx = {
-	.name		= "arminth_ck",
-	.parent		= &arm_ck,
-	.flags		= CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
-	.recalc		= &followparent_recalc,
-	/* Note: On 16xx the frequency can be divided by 2 by programming
-	 * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
-	 *
-	 * 1510 version is in TC clocks.
-	 */
-};
-
-static struct clk dsp_ck = {
-	.name		= "dsp_ck",
-	.parent		= &ck_dpll1,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  RATE_CKCTL,
-	.enable_reg	= ARM_CKCTL,
-	.enable_bit	= EN_DSPCK,
-	.rate_offset	= CKCTL_DSPDIV_OFFSET,
-	.recalc		= &ckctl_recalc,
-};
-
-static struct clk dspmmu_ck = {
-	.name		= "dspmmu_ck",
-	.parent		= &ck_dpll1,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  RATE_CKCTL | ALWAYS_ENABLED,
-	.rate_offset	= CKCTL_DSPMMUDIV_OFFSET,
-	.recalc		= &ckctl_recalc,
-};
-
-static struct clk dspper_ck = {
-	.name		= "dspper_ck",
-	.parent		= &ck_dpll1,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  RATE_CKCTL | DSP_DOMAIN_CLOCK | VIRTUAL_IO_ADDRESS,
-	.enable_reg	= DSP_IDLECT2,
-	.enable_bit	= EN_PERCK,
-	.rate_offset	= CKCTL_PERDIV_OFFSET,
-	.recalc		= &followparent_recalc,
-	//.recalc		= &ckctl_recalc,
-};
-
-static struct clk dspxor_ck = {
-	.name		= "dspxor_ck",
-	.parent		= &ck_ref,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  DSP_DOMAIN_CLOCK | VIRTUAL_IO_ADDRESS,
-	.enable_reg	= DSP_IDLECT2,
-	.enable_bit	= EN_XORPCK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk dsptim_ck = {
-	.name		= "dsptim_ck",
-	.parent		= &ck_ref,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  DSP_DOMAIN_CLOCK | VIRTUAL_IO_ADDRESS,
-	.enable_reg	= DSP_IDLECT2,
-	.enable_bit	= EN_DSPTIMCK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk tc_ck = {
-	.name		= "tc_ck",
-	.parent		= &ck_dpll1,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730 |
-			  RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
-	.rate_offset	= CKCTL_TCDIV_OFFSET,
-	.recalc		= &ckctl_recalc,
-};
-
-static struct clk arminth_ck1510 = {
-	.name		= "arminth_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
-	.recalc		= &followparent_recalc,
-	/* Note: On 1510 the frequency follows TC_CK
-	 *
-	 * 16xx version is in MPU clocks.
-	 */
-};
-
-static struct clk tipb_ck = {
-	.name		= "tibp_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk l3_ocpi_ck = {
-	.name		= "l3_ocpi_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP16XX,
-	.enable_reg	= ARM_IDLECT3,
-	.enable_bit	= EN_OCPI_CK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk tc1_ck = {
-	.name		= "tc1_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP16XX,
-	.enable_reg	= ARM_IDLECT3,
-	.enable_bit	= EN_TC1_CK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk tc2_ck = {
-	.name		= "tc2_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP16XX,
-	.enable_reg	= ARM_IDLECT3,
-	.enable_bit	= EN_TC2_CK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk dma_ck = {
-	.name		= "dma_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  ALWAYS_ENABLED,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk dma_lcdfree_ck = {
-	.name		= "dma_lcdfree_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk api_ck = {
-	.name		= "api_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
-	.enable_reg	= ARM_IDLECT2,
-	.enable_bit	= EN_APICK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk lb_ck = {
-	.name		= "lb_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP1510,
-	.enable_reg	= ARM_IDLECT2,
-	.enable_bit	= EN_LBCK,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk rhea1_ck = {
-	.name		= "rhea1_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk rhea2_ck = {
-	.name		= "rhea2_ck",
-	.parent		= &tc_ck,
-	.flags		= CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk lcd_ck = {
-	.name		= "lcd_ck",
-	.parent		= &ck_dpll1,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730 |
-			  RATE_CKCTL,
-	.enable_reg	= ARM_IDLECT2,
-	.enable_bit	= EN_LCDCK,
-	.rate_offset	= CKCTL_LCDDIV_OFFSET,
-	.recalc		= &ckctl_recalc,
-};
-
-static struct clk uart1_1510 = {
-	.name		= "uart1_ck",
-	/* Direct from ULPD, no parent */
-	.rate		= 12000000,
-	.flags		= CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT | ALWAYS_ENABLED,
-	.enable_reg	= MOD_CONF_CTRL_0,
-	.enable_bit	= 29,	/* Chooses between 12MHz and 48MHz */
-	.set_rate	= &set_uart_rate,
-	.recalc		= &uart_recalc,
-};
-
-static struct clk uart1_16xx = {
-	.name		= "uart1_ck",
-	/* Direct from ULPD, no parent */
-	.rate		= 48000000,
-	.flags		= CLOCK_IN_OMAP16XX | RATE_FIXED | ENABLE_REG_32BIT,
-	.enable_reg	= MOD_CONF_CTRL_0,
-	.enable_bit	= 29,
-};
-
-static struct clk uart2_ck = {
-	.name		= "uart2_ck",
-	/* Direct from ULPD, no parent */
-	.rate		= 12000000,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | ENABLE_REG_32BIT |
-			  ALWAYS_ENABLED,
-	.enable_reg	= MOD_CONF_CTRL_0,
-	.enable_bit	= 30,	/* Chooses between 12MHz and 48MHz */
-	.set_rate	= &set_uart_rate,
-	.recalc		= &uart_recalc,
-};
-
-static struct clk uart3_1510 = {
-	.name		= "uart3_ck",
-	/* Direct from ULPD, no parent */
-	.rate		= 12000000,
-	.flags		= CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT | ALWAYS_ENABLED,
-	.enable_reg	= MOD_CONF_CTRL_0,
-	.enable_bit	= 31,	/* Chooses between 12MHz and 48MHz */
-	.set_rate	= &set_uart_rate,
-	.recalc		= &uart_recalc,
-};
-
-static struct clk uart3_16xx = {
-	.name		= "uart3_ck",
-	/* Direct from ULPD, no parent */
-	.rate		= 48000000,
-	.flags		= CLOCK_IN_OMAP16XX | RATE_FIXED | ENABLE_REG_32BIT,
-	.enable_reg	= MOD_CONF_CTRL_0,
-	.enable_bit	= 31,
-};
-
-static struct clk usb_clko = {	/* 6 MHz output on W4_USB_CLKO */
-	.name		= "usb_clko",
-	/* Direct from ULPD, no parent */
-	.rate		= 6000000,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  RATE_FIXED | ENABLE_REG_32BIT,
-	.enable_reg	= ULPD_CLOCK_CTRL,
-	.enable_bit	= USB_MCLK_EN_BIT,
-};
-
-static struct clk usb_hhc_ck1510 = {
-	.name		= "usb_hhc_ck",
-	/* Direct from ULPD, no parent */
-	.rate		= 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
-	.flags		= CLOCK_IN_OMAP1510 |
-			  RATE_FIXED | ENABLE_REG_32BIT,
-	.enable_reg	= MOD_CONF_CTRL_0,
-	.enable_bit	= USB_HOST_HHC_UHOST_EN,
-};
-
-static struct clk usb_hhc_ck16xx = {
-	.name		= "usb_hhc_ck",
-	/* Direct from ULPD, no parent */
-	.rate		= 48000000,
-	/* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
-	.flags		= CLOCK_IN_OMAP16XX |
-			  RATE_FIXED | ENABLE_REG_32BIT,
-	.enable_reg	= OTG_BASE + 0x08 /* OTG_SYSCON_2 */,
-	.enable_bit	= 8 /* UHOST_EN */,
-};
-
-static struct clk usb_dc_ck = {
-	.name		= "usb_dc_ck",
-	/* Direct from ULPD, no parent */
-	.rate		= 48000000,
-	.flags		= CLOCK_IN_OMAP16XX | RATE_FIXED,
-	.enable_reg	= SOFT_REQ_REG,
-	.enable_bit	= 4,
-};
-
-static struct clk mclk_1510 = {
-	.name		= "mclk",
-	/* Direct from ULPD, no parent. May be enabled by ext hardware. */
-	.rate		= 12000000,
-	.flags		= CLOCK_IN_OMAP1510 | RATE_FIXED,
-};
-
-static struct clk mclk_16xx = {
-	.name		= "mclk",
-	/* Direct from ULPD, no parent. May be enabled by ext hardware. */
-	.flags		= CLOCK_IN_OMAP16XX,
-	.enable_reg	= COM_CLK_DIV_CTRL_SEL,
-	.enable_bit	= COM_ULPD_PLL_CLK_REQ,
-	.set_rate	= &set_ext_clk_rate,
-	.round_rate	= &round_ext_clk_rate,
-	.init		= &init_ext_clk,
-};
-
-static struct clk bclk_1510 = {
-	.name		= "bclk",
-	/* Direct from ULPD, no parent. May be enabled by ext hardware. */
-	.rate		= 12000000,
-	.flags		= CLOCK_IN_OMAP1510 | RATE_FIXED,
-};
-
-static struct clk bclk_16xx = {
-	.name		= "bclk",
-	/* Direct from ULPD, no parent. May be enabled by ext hardware. */
-	.flags		= CLOCK_IN_OMAP16XX,
-	.enable_reg	= SWD_CLK_DIV_CTRL_SEL,
-	.enable_bit	= SWD_ULPD_PLL_CLK_REQ,
-	.set_rate	= &set_ext_clk_rate,
-	.round_rate	= &round_ext_clk_rate,
-	.init		= &init_ext_clk,
-};
-
-static struct clk mmc1_ck = {
-	.name		= "mmc1_ck",
-	/* Functional clock is direct from ULPD, interface clock is ARMPER */
-	.parent		= &armper_ck,
-	.rate		= 48000000,
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  RATE_FIXED | ENABLE_REG_32BIT,
-	.enable_reg	= MOD_CONF_CTRL_0,
-	.enable_bit	= 23,
-};
-
-static struct clk mmc2_ck = {
-	.name		= "mmc2_ck",
-	/* Functional clock is direct from ULPD, interface clock is ARMPER */
-	.parent		= &armper_ck,
-	.rate		= 48000000,
-	.flags		= CLOCK_IN_OMAP16XX |
-			  RATE_FIXED | ENABLE_REG_32BIT,
-	.enable_reg	= MOD_CONF_CTRL_0,
-	.enable_bit	= 20,
-};
-
-static struct clk virtual_ck_mpu = {
-	.name		= "mpu",
-	.flags		= CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
-			  VIRTUAL_CLOCK | ALWAYS_ENABLED,
-	.parent		= &arm_ck, /* Is smarter alias for */
-	.recalc		= &followparent_recalc,
-	.set_rate	= &select_table_rate,
-	.round_rate	= &round_to_table_rate,
-};
-
-
-static struct clk *  onchip_clks[] = {
-	/* non-ULPD clocks */
-	&ck_ref,
-	&ck_dpll1,
-	/* CK_GEN1 clocks */
-	&ck_dpll1out,
-	&arm_ck,
-	&armper_ck,
-	&arm_gpio_ck,
-	&armxor_ck,
-	&armtim_ck,
-	&armwdt_ck,
-	&arminth_ck1510,  &arminth_ck16xx,
-	/* CK_GEN2 clocks */
-	&dsp_ck,
-	&dspmmu_ck,
-	&dspper_ck,
-	&dspxor_ck,
-	&dsptim_ck,
-	/* CK_GEN3 clocks */
-	&tc_ck,
-	&tipb_ck,
-	&l3_ocpi_ck,
-	&tc1_ck,
-	&tc2_ck,
-	&dma_ck,
-	&dma_lcdfree_ck,
-	&api_ck,
-	&lb_ck,
-	&rhea1_ck,
-	&rhea2_ck,
-	&lcd_ck,
-	/* ULPD clocks */
-	&uart1_1510,
-	&uart1_16xx,
-	&uart2_ck,
-	&uart3_1510,
-	&uart3_16xx,
-	&usb_clko,
-	&usb_hhc_ck1510, &usb_hhc_ck16xx,
-	&usb_dc_ck,
-	&mclk_1510,  &mclk_16xx,
-	&bclk_1510,  &bclk_16xx,
-	&mmc1_ck,
-	&mmc2_ck,
-	/* Virtual clocks */
-	&virtual_ck_mpu,
-};
-
-struct clk *clk_get(struct device *dev, const char *id)
+struct clk * clk_get(struct device *dev, const char *id)
 {
 	struct clk *p, *clk = ERR_PTR(-ENOENT);
 
@@ -590,6 +53,89 @@
 }
 EXPORT_SYMBOL(clk_get);
 
+int clk_enable(struct clk *clk)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (clk->enable)
+		ret = clk->enable(clk);
+	else if (arch_clock->clk_enable)
+		ret = arch_clock->clk_enable(clk);
+	else
+		printk(KERN_ERR "Could not enable clock %s\n", clk->name);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (clk->disable)
+		clk->disable(clk);
+	else if (arch_clock->clk_disable)
+		arch_clock->clk_disable(clk);
+	else
+		printk(KERN_ERR "Could not disable clock %s\n", clk->name);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+int clk_use(struct clk *clk)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (arch_clock->clk_use)
+		ret = arch_clock->clk_use(clk);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(clk_use);
+
+void clk_unuse(struct clk *clk)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (arch_clock->clk_unuse)
+		arch_clock->clk_unuse(clk);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
+}
+EXPORT_SYMBOL(clk_unuse);
+
+int clk_get_usecount(struct clk *clk)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&clockfw_lock, flags);
+	ret = clk->usecount;
+	spin_unlock_irqrestore(&clockfw_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(clk_get_usecount);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+	unsigned long flags;
+	unsigned long ret = 0;
+
+	spin_lock_irqsave(&clockfw_lock, flags);
+	ret = clk->rate;
+	spin_unlock_irqrestore(&clockfw_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(clk_get_rate);
 
 void clk_put(struct clk *clk)
 {
@@ -598,526 +144,109 @@
 }
 EXPORT_SYMBOL(clk_put);
 
-
-int __clk_enable(struct clk *clk)
-{
-	__u16 regval16;
-	__u32 regval32;
-
-	if (clk->flags & ALWAYS_ENABLED)
-		return 0;
-
-	if (unlikely(clk->enable_reg == 0)) {
-		printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
-		       clk->name);
-		return 0;
-	}
-
-	if (clk->flags & DSP_DOMAIN_CLOCK) {
-		__clk_use(&api_ck);
-	}
-
-	if (clk->flags & ENABLE_REG_32BIT) {
-		if (clk->flags & VIRTUAL_IO_ADDRESS) {
-			regval32 = __raw_readl(clk->enable_reg);
-			regval32 |= (1 << clk->enable_bit);
-			__raw_writel(regval32, clk->enable_reg);
-		} else {
-			regval32 = omap_readl(clk->enable_reg);
-			regval32 |= (1 << clk->enable_bit);
-			omap_writel(regval32, clk->enable_reg);
-		}
-	} else {
-		if (clk->flags & VIRTUAL_IO_ADDRESS) {
-			regval16 = __raw_readw(clk->enable_reg);
-			regval16 |= (1 << clk->enable_bit);
-			__raw_writew(regval16, clk->enable_reg);
-		} else {
-			regval16 = omap_readw(clk->enable_reg);
-			regval16 |= (1 << clk->enable_bit);
-			omap_writew(regval16, clk->enable_reg);
-		}
-	}
-
-	if (clk->flags & DSP_DOMAIN_CLOCK) {
-		__clk_unuse(&api_ck);
-	}
-
-	return 0;
-}
-
-
-void __clk_disable(struct clk *clk)
-{
-	__u16 regval16;
-	__u32 regval32;
-
-	if (clk->enable_reg == 0)
-		return;
-
-	if (clk->flags & DSP_DOMAIN_CLOCK) {
-		__clk_use(&api_ck);
-	}
-
-	if (clk->flags & ENABLE_REG_32BIT) {
-		if (clk->flags & VIRTUAL_IO_ADDRESS) {
-			regval32 = __raw_readl(clk->enable_reg);
-			regval32 &= ~(1 << clk->enable_bit);
-			__raw_writel(regval32, clk->enable_reg);
-		} else {
-			regval32 = omap_readl(clk->enable_reg);
-			regval32 &= ~(1 << clk->enable_bit);
-			omap_writel(regval32, clk->enable_reg);
-		}
-	} else {
-		if (clk->flags & VIRTUAL_IO_ADDRESS) {
-			regval16 = __raw_readw(clk->enable_reg);
-			regval16 &= ~(1 << clk->enable_bit);
-			__raw_writew(regval16, clk->enable_reg);
-		} else {
-			regval16 = omap_readw(clk->enable_reg);
-			regval16 &= ~(1 << clk->enable_bit);
-			omap_writew(regval16, clk->enable_reg);
-		}
-	}
-
-	if (clk->flags & DSP_DOMAIN_CLOCK) {
-		__clk_unuse(&api_ck);
-	}
-}
-
-
-void __clk_unuse(struct clk *clk)
-{
-	if (clk->usecount > 0 && !(--clk->usecount)) {
-		__clk_disable(clk);
-		if (likely(clk->parent))
-			__clk_unuse(clk->parent);
-	}
-}
-
-
-int __clk_use(struct clk *clk)
-{
-	int ret = 0;
-	if (clk->usecount++ == 0) {
-		if (likely(clk->parent))
-			ret = __clk_use(clk->parent);
-
-		if (unlikely(ret != 0)) {
-			clk->usecount--;
-			return ret;
-		}
-
-		ret = __clk_enable(clk);
-
-		if (unlikely(ret != 0) && clk->parent) {
-			__clk_unuse(clk->parent);
-			clk->usecount--;
-		}
-	}
-
-	return ret;
-}
-
-
-int clk_enable(struct clk *clk)
-{
-	unsigned long flags;
-	int ret;
-
-	spin_lock_irqsave(&clockfw_lock, flags);
-	ret = __clk_enable(clk);
-	spin_unlock_irqrestore(&clockfw_lock, flags);
-	return ret;
-}
-EXPORT_SYMBOL(clk_enable);
-
-
-void clk_disable(struct clk *clk)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&clockfw_lock, flags);
-	__clk_disable(clk);
-	spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-
-int clk_use(struct clk *clk)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&clockfw_lock, flags);
-	ret = __clk_use(clk);
-	spin_unlock_irqrestore(&clockfw_lock, flags);
-	return ret;
-}
-EXPORT_SYMBOL(clk_use);
-
-
-void clk_unuse(struct clk *clk)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&clockfw_lock, flags);
-	__clk_unuse(clk);
-	spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-EXPORT_SYMBOL(clk_unuse);
-
-
-int clk_get_usecount(struct clk *clk)
-{
-        return clk->usecount;
-}
-EXPORT_SYMBOL(clk_get_usecount);
-
-
-unsigned long clk_get_rate(struct clk *clk)
-{
-	return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-
-static __u16 verify_ckctl_value(__u16 newval)
-{
-	/* This function checks for following limitations set
-	 * by the hardware (all conditions must be true):
-	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
-	 * ARM_CK >= TC_CK
-	 * DSP_CK >= TC_CK
-	 * DSPMMU_CK >= TC_CK
-	 *
-	 * In addition following rules are enforced:
-	 * LCD_CK <= TC_CK
-	 * ARMPER_CK <= TC_CK
-	 *
-	 * However, maximum frequencies are not checked for!
-	 */
-	__u8 per_exp;
-	__u8 lcd_exp;
-	__u8 arm_exp;
-	__u8 dsp_exp;
-	__u8 tc_exp;
-	__u8 dspmmu_exp;
-
-	per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
-	lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
-	arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
-	dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
-	tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
-	dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
-
-	if (dspmmu_exp < dsp_exp)
-		dspmmu_exp = dsp_exp;
-	if (dspmmu_exp > dsp_exp+1)
-		dspmmu_exp = dsp_exp+1;
-	if (tc_exp < arm_exp)
-		tc_exp = arm_exp;
-	if (tc_exp < dspmmu_exp)
-		tc_exp = dspmmu_exp;
-	if (tc_exp > lcd_exp)
-		lcd_exp = tc_exp;
-	if (tc_exp > per_exp)
-		per_exp = tc_exp;
-
-	newval &= 0xf000;
-	newval |= per_exp << CKCTL_PERDIV_OFFSET;
-	newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
-	newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
-	newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
-	newval |= tc_exp << CKCTL_TCDIV_OFFSET;
-	newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
-
-	return newval;
-}
-
-
-static int calc_dsor_exp(struct clk *clk, unsigned long rate)
-{
-	/* Note: If target frequency is too low, this function will return 4,
-	 * which is invalid value. Caller must check for this value and act
-	 * accordingly.
-	 *
-	 * Note: This function does not check for following limitations set
-	 * by the hardware (all conditions must be true):
-	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
-	 * ARM_CK >= TC_CK
-	 * DSP_CK >= TC_CK
-	 * DSPMMU_CK >= TC_CK
-	 */
-	unsigned long realrate;
-	struct clk *  parent;
-	unsigned  dsor_exp;
-
-	if (unlikely(!(clk->flags & RATE_CKCTL)))
-		return -EINVAL;
-
-	parent = clk->parent;
-	if (unlikely(parent == 0))
-		return -EIO;
-
-	realrate = parent->rate;
-	for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
-		if (realrate <= rate)
-			break;
-
-		realrate /= 2;
-	}
-
-	return dsor_exp;
-}
-
-
-static void ckctl_recalc(struct clk *  clk)
-{
-	int dsor;
-
-	/* Calculate divisor encoded as 2-bit exponent */
-	if (clk->flags & DSP_DOMAIN_CLOCK) {
-		/* The clock control bits are in DSP domain,
-		 * so api_ck is needed for access.
-		 * Note that DSP_CKCTL virt addr = phys addr, so
-		 * we must use __raw_readw() instead of omap_readw().
-		 */
-		__clk_use(&api_ck);
-		dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
-		__clk_unuse(&api_ck);
-	} else {
-		dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
-	}
-	if (unlikely(clk->rate == clk->parent->rate / dsor))
-		return; /* No change, quick exit */
-	clk->rate = clk->parent->rate / dsor;
-
-	if (unlikely(clk->flags & RATE_PROPAGATES))
-		propagate_rate(clk);
-}
-
+/*-------------------------------------------------------------------------
+ * Optional clock functions defined in asm/hardware/clock.h
+ *-------------------------------------------------------------------------*/
 
 long clk_round_rate(struct clk *clk, unsigned long rate)
 {
-	int dsor_exp;
+	unsigned long flags;
+	long ret = 0;
 
-	if (clk->flags & RATE_FIXED)
-		return clk->rate;
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (arch_clock->clk_round_rate)
+		ret = arch_clock->clk_round_rate(clk, rate);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
 
-	if (clk->flags & RATE_CKCTL) {
-		dsor_exp = calc_dsor_exp(clk, rate);
-		if (dsor_exp < 0)
-			return dsor_exp;
-		if (dsor_exp > 3)
-			dsor_exp = 3;
-		return clk->parent->rate / (1 << dsor_exp);
-	}
-
-	if(clk->round_rate != 0)
-		return clk->round_rate(clk, rate);
-
-	return clk->rate;
+	return ret;
 }
 EXPORT_SYMBOL(clk_round_rate);
 
-
-static void propagate_rate(struct clk *  clk)
-{
-	struct clk **  clkp;
-
-	for (clkp = onchip_clks; clkp < onchip_clks+ARRAY_SIZE(onchip_clks); clkp++) {
-		if (likely((*clkp)->parent != clk)) continue;
-		if (likely((*clkp)->recalc))
-			(*clkp)->recalc(*clkp);
-	}
-}
-
-
-static int select_table_rate(struct clk *  clk, unsigned long rate)
-{
-	/* Find the highest supported frequency <= rate and switch to it */
-	struct mpu_rate *  ptr;
-
-	if (clk != &virtual_ck_mpu)
-		return -EINVAL;
-
-	for (ptr = rate_table; ptr->rate; ptr++) {
-		if (ptr->xtal != ck_ref.rate)
-			continue;
-
-		/* DPLL1 cannot be reprogrammed without risking system crash */
-		if (likely(ck_dpll1.rate!=0) && ptr->pll_rate != ck_dpll1.rate)
-			continue;
-
-		/* Can check only after xtal frequency check */
-		if (ptr->rate <= rate)
-			break;
-	}
-
-	if (!ptr->rate)
-		return -EINVAL;
-
-	/*
-	 * In most cases we should not need to reprogram DPLL.
-	 * Reprogramming the DPLL is tricky, it must be done from SRAM.
-	 */
-	omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
-
-	ck_dpll1.rate = ptr->pll_rate;
-	propagate_rate(&ck_dpll1);
-	return 0;
-}
-
-
-static long round_to_table_rate(struct clk *  clk, unsigned long rate)
-{
-	/* Find the highest supported frequency <= rate */
-	struct mpu_rate *  ptr;
-	long  highest_rate;
-
-	if (clk != &virtual_ck_mpu)
-		return -EINVAL;
-
-	highest_rate = -EINVAL;
-
-	for (ptr = rate_table; ptr->rate; ptr++) {
-		if (ptr->xtal != ck_ref.rate)
-			continue;
-
-		highest_rate = ptr->rate;
-
-		/* Can check only after xtal frequency check */
-		if (ptr->rate <= rate)
-			break;
-	}
-
-	return highest_rate;
-}
-
-
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
-	int  ret = -EINVAL;
-	int  dsor_exp;
-	__u16  regval;
-	unsigned long  flags;
+	unsigned long flags;
+	int ret = 0;
 
-	if (clk->flags & RATE_CKCTL) {
-		dsor_exp = calc_dsor_exp(clk, rate);
-		if (dsor_exp > 3)
-			dsor_exp = -EINVAL;
-		if (dsor_exp < 0)
-			return dsor_exp;
-
-		spin_lock_irqsave(&clockfw_lock, flags);
-		regval = omap_readw(ARM_CKCTL);
-		regval &= ~(3 << clk->rate_offset);
-		regval |= dsor_exp << clk->rate_offset;
-		regval = verify_ckctl_value(regval);
-		omap_writew(regval, ARM_CKCTL);
-		clk->rate = clk->parent->rate / (1 << dsor_exp);
-		spin_unlock_irqrestore(&clockfw_lock, flags);
-		ret = 0;
-	} else if(clk->set_rate != 0) {
-		spin_lock_irqsave(&clockfw_lock, flags);
-		ret = clk->set_rate(clk, rate);
-		spin_unlock_irqrestore(&clockfw_lock, flags);
-	}
-
-	if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
-		propagate_rate(clk);
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (arch_clock->clk_set_rate)
+		ret = arch_clock->clk_set_rate(clk, rate);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
 
 	return ret;
 }
 EXPORT_SYMBOL(clk_set_rate);
 
-
-static unsigned calc_ext_dsor(unsigned long rate)
+int clk_set_parent(struct clk *clk, struct clk *parent)
 {
-	unsigned dsor;
+	unsigned long flags;
+	int ret = 0;
 
-	/* MCLK and BCLK divisor selection is not linear:
-	 * freq = 96MHz / dsor
-	 *
-	 * RATIO_SEL range: dsor <-> RATIO_SEL
-	 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
-	 * 6..48:  (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
-	 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
-	 * can not be used.
-	 */
-	for (dsor = 2; dsor < 96; ++dsor) {
-		if ((dsor & 1) && dsor > 8)
-		  	continue;
-		if (rate >= 96000000 / dsor)
-			break;
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (arch_clock->clk_set_parent)
+		ret =  arch_clock->clk_set_parent(clk, parent);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+	unsigned long flags;
+	struct clk * ret = NULL;
+
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (arch_clock->clk_get_parent)
+		ret = arch_clock->clk_get_parent(clk);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+/*-------------------------------------------------------------------------
+ * OMAP specific clock functions shared between omap1 and omap2
+ *-------------------------------------------------------------------------*/
+
+unsigned int __initdata mpurate;
+
+/*
+ * By default we use the rate set by the bootloader.
+ * You can override this with mpurate= cmdline option.
+ */
+static int __init omap_clk_setup(char *str)
+{
+	get_option(&str, &mpurate);
+
+	if (!mpurate)
+		return 1;
+
+	if (mpurate < 1000)
+		mpurate *= 1000000;
+
+	return 1;
+}
+__setup("mpurate=", omap_clk_setup);
+
+/* Used for clocks that always have same value as the parent clock */
+void followparent_recalc(struct clk *clk)
+{
+	clk->rate = clk->parent->rate;
+}
+
+/* Propagate rate to children */
+void propagate_rate(struct clk * tclk)
+{
+	struct clk *clkp;
+
+	list_for_each_entry(clkp, &clocks, node) {
+		if (likely(clkp->parent != tclk))
+			continue;
+		if (likely((u32)clkp->recalc))
+			clkp->recalc(clkp);
 	}
-	return dsor;
 }
 
-/* Only needed on 1510 */
-static int set_uart_rate(struct clk * clk, unsigned long rate)
-{
-	unsigned int val;
-
-	val = omap_readl(clk->enable_reg);
-	if (rate == 12000000)
-		val &= ~(1 << clk->enable_bit);
-	else if (rate == 48000000)
-		val |= (1 << clk->enable_bit);
-	else
-		return -EINVAL;
-	omap_writel(val, clk->enable_reg);
-	clk->rate = rate;
-
-	return 0;
-}
-
-static int set_ext_clk_rate(struct clk *  clk, unsigned long rate)
-{
-	unsigned dsor;
-	__u16 ratio_bits;
-
-	dsor = calc_ext_dsor(rate);
-	clk->rate = 96000000 / dsor;
-	if (dsor > 8)
-		ratio_bits = ((dsor - 8) / 2 + 6) << 2;
-	else
-		ratio_bits = (dsor - 2) << 2;
-
-	ratio_bits |= omap_readw(clk->enable_reg) & ~0xfd;
-	omap_writew(ratio_bits, clk->enable_reg);
-
-	return 0;
-}
-
-
-static long round_ext_clk_rate(struct clk *  clk, unsigned long rate)
-{
-	return 96000000 / calc_ext_dsor(rate);
-}
-
-
-static void init_ext_clk(struct clk *  clk)
-{
-	unsigned dsor;
-	__u16 ratio_bits;
-
-	/* Determine current rate and ensure clock is based on 96MHz APLL */
-	ratio_bits = omap_readw(clk->enable_reg) & ~1;
-	omap_writew(ratio_bits, clk->enable_reg);
-
-	ratio_bits = (ratio_bits & 0xfc) >> 2;
-	if (ratio_bits > 6)
-		dsor = (ratio_bits - 6) * 2 + 8;
-	else
-		dsor = ratio_bits + 2;
-
-	clk-> rate = 96000000 / dsor;
-}
-
-
 int clk_register(struct clk *clk)
 {
 	down(&clocks_sem);
@@ -1125,6 +254,7 @@
 	if (clk->init)
 		clk->init(clk);
 	up(&clocks_sem);
+
 	return 0;
 }
 EXPORT_SYMBOL(clk_register);
@@ -1137,203 +267,38 @@
 }
 EXPORT_SYMBOL(clk_unregister);
 
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-/*
- * Resets some clocks that may be left on from bootloader,
- * but leaves serial clocks on. See also omap_late_clk_reset().
- */
-static inline void omap_early_clk_reset(void)
+void clk_deny_idle(struct clk *clk)
 {
-	//omap_writel(0x3 << 29, MOD_CONF_CTRL_0);
+	unsigned long flags;
+
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (arch_clock->clk_deny_idle)
+		arch_clock->clk_deny_idle(clk);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
 }
-#else
-#define omap_early_clk_reset()	{}
-#endif
+EXPORT_SYMBOL(clk_deny_idle);
 
-int __init clk_init(void)
+void clk_allow_idle(struct clk *clk)
 {
-	struct clk **  clkp;
-	const struct omap_clock_config *info;
-	int crystal_type = 0; /* Default 12 MHz */
+	unsigned long flags;
 
-	omap_early_clk_reset();
+	spin_lock_irqsave(&clockfw_lock, flags);
+	if (arch_clock->clk_allow_idle)
+		arch_clock->clk_allow_idle(clk);
+	spin_unlock_irqrestore(&clockfw_lock, flags);
+}
+EXPORT_SYMBOL(clk_allow_idle);
 
-	for (clkp = onchip_clks; clkp < onchip_clks+ARRAY_SIZE(onchip_clks); clkp++) {
-		if (((*clkp)->flags &CLOCK_IN_OMAP1510) && cpu_is_omap1510()) {
-			clk_register(*clkp);
-			continue;
-		}
+/*-------------------------------------------------------------------------*/
 
-		if (((*clkp)->flags &CLOCK_IN_OMAP16XX) && cpu_is_omap16xx()) {
-			clk_register(*clkp);
-			continue;
-		}
-
-		if (((*clkp)->flags &CLOCK_IN_OMAP730) && cpu_is_omap730()) {
-			clk_register(*clkp);
-			continue;
-		}
+int __init clk_init(struct clk_functions * custom_clocks)
+{
+	if (!custom_clocks) {
+		printk(KERN_ERR "No custom clock functions registered\n");
+		BUG();
 	}
 
-	info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
-	if (info != NULL) {
-		if (!cpu_is_omap1510())
-			crystal_type = info->system_clock_type;
-	}
-
-#if defined(CONFIG_ARCH_OMAP730)
-	ck_ref.rate = 13000000;
-#elif defined(CONFIG_ARCH_OMAP16XX)
-	if (crystal_type == 2)
-		ck_ref.rate = 19200000;
-#endif
-
-	printk("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: 0x%04x\n",
-	       omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
-	       omap_readw(ARM_CKCTL));
-
-	/* We want to be in syncronous scalable mode */
-	omap_writew(0x1000, ARM_SYSST);
-
-#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
-	/* Use values set by bootloader. Determine PLL rate and recalculate
-	 * dependent clocks as if kernel had changed PLL or divisors.
-	 */
-	{
-		unsigned pll_ctl_val = omap_readw(DPLL_CTL);
-
-		ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */
-		if (pll_ctl_val & 0x10) {
-			/* PLL enabled, apply multiplier and divisor */
-			if (pll_ctl_val & 0xf80)
-				ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7;
-			ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1;
-		} else {
-			/* PLL disabled, apply bypass divisor */
-			switch (pll_ctl_val & 0xc) {
-			case 0:
-				break;
-			case 0x4:
-				ck_dpll1.rate /= 2;
-				break;
-			default:
-				ck_dpll1.rate /= 4;
-				break;
-			}
-		}
-	}
-	propagate_rate(&ck_dpll1);
-#else
-	/* Find the highest supported frequency and enable it */
-	if (select_table_rate(&virtual_ck_mpu, ~0)) {
-		printk(KERN_ERR "System frequencies not set. Check your config.\n");
-		/* Guess sane values (60MHz) */
-		omap_writew(0x2290, DPLL_CTL);
-		omap_writew(0x1005, ARM_CKCTL);
-		ck_dpll1.rate = 60000000;
-		propagate_rate(&ck_dpll1);
-	}
-#endif
-	/* Cache rates for clocks connected to ck_ref (not dpll1) */
-	propagate_rate(&ck_ref);
-	printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
-		"%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
-	       ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
-	       ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
-	       arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
-
-#ifdef CONFIG_MACH_OMAP_PERSEUS2
-	/* Select slicer output as OMAP input clock */
-	omap_writew(omap_readw(OMAP730_PCC_UPLD_CTRL) & ~0x1, OMAP730_PCC_UPLD_CTRL);
-#endif
-
-	/* Turn off DSP and ARM_TIMXO. Make sure ARM_INTHCK is not divided */
-	omap_writew(omap_readw(ARM_CKCTL) & 0x0fff, ARM_CKCTL);
-
-	/* Put DSP/MPUI into reset until needed */
-	omap_writew(0, ARM_RSTCT1);
-	omap_writew(1, ARM_RSTCT2);
-	omap_writew(0x400, ARM_IDLECT1);
-
-	/*
-	 * According to OMAP5910 Erratum SYS_DMA_1, bit DMACK_REQ (bit 8)
-	 * of the ARM_IDLECT2 register must be set to zero. The power-on
-	 * default value of this bit is one.
-	 */
-	omap_writew(0x0000, ARM_IDLECT2);	/* Turn LCD clock off also */
-
-	/*
-	 * Only enable those clocks we will need, let the drivers
-	 * enable other clocks as necessary
-	 */
-	clk_use(&armper_ck);
-	clk_use(&armxor_ck);
-	clk_use(&armtim_ck);
-
-	if (cpu_is_omap1510())
-		clk_enable(&arm_gpio_ck);
+	arch_clock = custom_clocks;
 
 	return 0;
 }
-
-
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-
-static int __init omap_late_clk_reset(void)
-{
-	/* Turn off all unused clocks */
-	struct clk *p;
-	__u32 regval32;
-
-	/* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
-	regval32 = omap_readw(SOFT_REQ_REG) & (1 << 4);
-	omap_writew(regval32, SOFT_REQ_REG);
-	omap_writew(0, SOFT_REQ_REG2);
-
-	list_for_each_entry(p, &clocks, node) {
-		if (p->usecount > 0 || (p->flags & ALWAYS_ENABLED) ||
-			p->enable_reg == 0)
-			continue;
-
-		/* Assume no DSP clocks have been activated by bootloader */
-		if (p->flags & DSP_DOMAIN_CLOCK)
-			continue;
-
-		/* Is the clock already disabled? */
-		if (p->flags & ENABLE_REG_32BIT) {
-			if (p->flags & VIRTUAL_IO_ADDRESS)
-				regval32 = __raw_readl(p->enable_reg);
-			else
-				regval32 = omap_readl(p->enable_reg);
-		} else {
-			if (p->flags & VIRTUAL_IO_ADDRESS)
-				regval32 = __raw_readw(p->enable_reg);
-			else
-				regval32 = omap_readw(p->enable_reg);
-		}
-
-		if ((regval32 & (1 << p->enable_bit)) == 0)
-			continue;
-
-		/* FIXME: This clock seems to be necessary but no-one
-		 * has asked for its activation. */
-		if (p == &tc2_ck         // FIX: pm.c (SRAM), CCP, Camera
-		    || p == &ck_dpll1out // FIX: SoSSI, SSR
-		    || p == &arm_gpio_ck // FIX: GPIO code for 1510
-		    ) {
-			printk(KERN_INFO "FIXME: Clock \"%s\" seems unused\n",
-			       p->name);
-			continue;
-		}
-
-		printk(KERN_INFO "Disabling unused clock \"%s\"... ", p->name);
-		__clk_disable(p);
-		printk(" done\n");
-	}
-
-	return 0;
-}
-
-late_initcall(omap_late_clk_reset);
-
-#endif
diff --git a/arch/arm/plat-omap/clock.h b/arch/arm/plat-omap/clock.h
deleted file mode 100644
index a89e1e8..0000000
--- a/arch/arm/plat-omap/clock.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- *  linux/arch/arm/plat-omap/clock.h
- *
- *  Copyright (C) 2004 Nokia corporation
- *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
- *  Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ARCH_ARM_OMAP_CLOCK_H
-#define __ARCH_ARM_OMAP_CLOCK_H
-
-struct module;
-
-struct clk {
-	struct list_head	node;
-	struct module		*owner;
-	const char		*name;
-	struct clk		*parent;
-	unsigned long		rate;
-	__s8			usecount;
-	__u16			flags;
-	__u32			enable_reg;
-	__u8			enable_bit;
-	__u8			rate_offset;
-	void			(*recalc)(struct clk *);
-	int			(*set_rate)(struct clk *, unsigned long);
-	long			(*round_rate)(struct clk *, unsigned long);
-	void			(*init)(struct clk *);
-};
-
-
-struct mpu_rate {
-	unsigned long		rate;
-	unsigned long		xtal;
-	unsigned long		pll_rate;
-	__u16			ckctl_val;
-	__u16			dpllctl_val;
-};
-
-
-/* Clock flags */
-#define RATE_CKCTL		1
-#define RATE_FIXED		2
-#define RATE_PROPAGATES		4
-#define VIRTUAL_CLOCK		8
-#define ALWAYS_ENABLED		16
-#define ENABLE_REG_32BIT	32
-#define CLOCK_IN_OMAP16XX	64
-#define CLOCK_IN_OMAP1510	128
-#define CLOCK_IN_OMAP730	256
-#define DSP_DOMAIN_CLOCK	512
-#define VIRTUAL_IO_ADDRESS	1024
-
-/* ARM_CKCTL bit shifts */
-#define CKCTL_PERDIV_OFFSET	0
-#define CKCTL_LCDDIV_OFFSET	2
-#define CKCTL_ARMDIV_OFFSET	4
-#define CKCTL_DSPDIV_OFFSET	6
-#define CKCTL_TCDIV_OFFSET	8
-#define CKCTL_DSPMMUDIV_OFFSET	10
-/*#define ARM_TIMXO		12*/
-#define EN_DSPCK		13
-/*#define ARM_INTHCK_SEL	14*/ /* Divide-by-2 for mpu inth_ck */
-/* DSP_CKCTL bit shifts */
-#define CKCTL_DSPPERDIV_OFFSET	0
-
-/* ARM_IDLECT1 bit shifts */
-/*#define IDLWDT_ARM	0*/
-/*#define IDLXORP_ARM	1*/
-/*#define IDLPER_ARM	2*/
-/*#define IDLLCD_ARM	3*/
-/*#define IDLLB_ARM	4*/
-/*#define IDLHSAB_ARM	5*/
-/*#define IDLIF_ARM	6*/
-/*#define IDLDPLL_ARM	7*/
-/*#define IDLAPI_ARM	8*/
-/*#define IDLTIM_ARM	9*/
-/*#define SETARM_IDLE	11*/
-
-/* ARM_IDLECT2 bit shifts */
-#define EN_WDTCK	0
-#define EN_XORPCK	1
-#define EN_PERCK	2
-#define EN_LCDCK	3
-#define EN_LBCK		4 /* Not on 1610/1710 */
-/*#define EN_HSABCK	5*/
-#define EN_APICK	6
-#define EN_TIMCK	7
-#define DMACK_REQ	8
-#define EN_GPIOCK	9 /* Not on 1610/1710 */
-/*#define EN_LBFREECK	10*/
-#define EN_CKOUT_ARM	11
-
-/* ARM_IDLECT3 bit shifts */
-#define EN_OCPI_CK	0
-#define EN_TC1_CK	2
-#define EN_TC2_CK	4
-
-/* DSP_IDLECT2 bit shifts (0,1,2 are same as for ARM_IDLECT2) */
-#define EN_DSPTIMCK	5
-
-/* Various register defines for clock controls scattered around OMAP chip */
-#define USB_MCLK_EN_BIT		4	/* In ULPD_CLKC_CTRL */
-#define USB_HOST_HHC_UHOST_EN	9	/* In MOD_CONF_CTRL_0 */
-#define SWD_ULPD_PLL_CLK_REQ	1	/* In SWD_CLK_DIV_CTRL_SEL */
-#define COM_ULPD_PLL_CLK_REQ	1	/* In COM_CLK_DIV_CTRL_SEL */
-#define SWD_CLK_DIV_CTRL_SEL	0xfffe0874
-#define COM_CLK_DIV_CTRL_SEL	0xfffe0878
-#define SOFT_REQ_REG		0xfffe0834
-#define SOFT_REQ_REG2		0xfffe0880
-
-int clk_register(struct clk *clk);
-void clk_unregister(struct clk *clk);
-int clk_init(void);
-
-#endif
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index 02bcc6c..ccdb4526 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -31,7 +31,7 @@
 #include <asm/arch/mux.h>
 #include <asm/arch/fpga.h>
 
-#include "clock.h"
+#include <asm/arch/clock.h>
 
 #define NO_LENGTH_CHECK 0xffffffff
 
@@ -117,19 +117,43 @@
 
 static int __init omap_add_serial_console(void)
 {
-	const struct omap_serial_console_config *info;
+	const struct omap_serial_console_config *con_info;
+	const struct omap_uart_config *uart_info;
+	static char speed[11], *opt = NULL;
+	int line, i, uart_idx;
 
-	info = omap_get_config(OMAP_TAG_SERIAL_CONSOLE,
-			       struct omap_serial_console_config);
-	if (info != NULL && info->console_uart) {
-		static char speed[11], *opt = NULL;
+	uart_info = omap_get_config(OMAP_TAG_UART, struct omap_uart_config);
+	con_info = omap_get_config(OMAP_TAG_SERIAL_CONSOLE,
+					struct omap_serial_console_config);
+	if (uart_info == NULL || con_info == NULL)
+		return 0;
 
-		if (info->console_speed) {
-			snprintf(speed, sizeof(speed), "%u", info->console_speed);
-			opt = speed;
-		}
-		return add_preferred_console("ttyS", info->console_uart - 1, opt);
+	if (con_info->console_uart == 0)
+		return 0;
+
+	if (con_info->console_speed) {
+		snprintf(speed, sizeof(speed), "%u", con_info->console_speed);
+		opt = speed;
 	}
-	return 0;
+
+	uart_idx = con_info->console_uart - 1;
+	if (uart_idx >= OMAP_MAX_NR_PORTS) {
+		printk(KERN_INFO "Console: external UART#%d. "
+			"Not adding it as console this time.\n",
+			uart_idx + 1);
+		return 0;
+	}
+	if (!(uart_info->enabled_uarts & (1 << uart_idx))) {
+		printk(KERN_ERR "Console: Selected UART#%d is "
+			"not enabled for this platform\n",
+			uart_idx + 1);
+		return -1;
+	}
+	line = 0;
+	for (i = 0; i < uart_idx; i++) {
+		if (uart_info->enabled_uarts & (1 << i))
+			line++;
+	}
+	return add_preferred_console("ttyS", line, opt);
 }
 console_initcall(omap_add_serial_console);
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
new file mode 100644
index 0000000..9dcce90
--- /dev/null
+++ b/arch/arm/plat-omap/devices.c
@@ -0,0 +1,381 @@
+/*
+ * linux/arch/arm/plat-omap/devices.c
+ *
+ * Common platform device setup/initialization for OMAP1 and OMAP2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/tc.h>
+#include <asm/arch/board.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/gpio.h>
+
+
+void omap_nop_release(struct device *dev)
+{
+        /* Nothing */
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if 	defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
+
+#define	OMAP1_I2C_BASE		0xfffb3800
+#define OMAP2_I2C_BASE1		0x48070000
+#define OMAP_I2C_SIZE		0x3f
+#define OMAP1_I2C_INT		INT_I2C
+#define OMAP2_I2C_INT1		56
+
+static struct resource i2c_resources1[] = {
+	{
+		.start		= 0,
+		.end		= 0,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= 0,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+/* DMA not used; works around erratum writing to non-empty i2c fifo */
+
+static struct platform_device omap_i2c_device1 = {
+        .name           = "i2c_omap",
+        .id             = 1,
+        .dev = {
+                .release        = omap_nop_release,
+        },
+	.num_resources	= ARRAY_SIZE(i2c_resources1),
+	.resource	= i2c_resources1,
+};
+
+/* See also arch/arm/mach-omap2/devices.c for second I2C on 24xx */
+static void omap_init_i2c(void)
+{
+	if (cpu_is_omap24xx()) {
+		i2c_resources1[0].start = OMAP2_I2C_BASE1;
+		i2c_resources1[0].end = OMAP2_I2C_BASE1 + OMAP_I2C_SIZE;
+		i2c_resources1[1].start = OMAP2_I2C_INT1;
+	} else {
+		i2c_resources1[0].start = OMAP1_I2C_BASE;
+		i2c_resources1[0].end = OMAP1_I2C_BASE + OMAP_I2C_SIZE;
+		i2c_resources1[1].start = OMAP1_I2C_INT;
+	}
+
+	/* FIXME define and use a boot tag, in case of boards that
+	 * either don't wire up I2C, or chips that mux it differently...
+	 * it can include clocking and address info, maybe more.
+	 */
+	if (cpu_is_omap24xx()) {
+		omap_cfg_reg(M19_24XX_I2C1_SCL);
+		omap_cfg_reg(L15_24XX_I2C1_SDA);
+	} else {
+		omap_cfg_reg(I2C_SCL);
+		omap_cfg_reg(I2C_SDA);
+	}
+
+	(void) platform_device_register(&omap_i2c_device1);
+}
+
+#else
+static inline void omap_init_i2c(void) {}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#if	defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP24XX
+#define	OMAP_MMC1_BASE		0x4809c000
+#define OMAP_MMC1_INT		83
+#else
+#define	OMAP_MMC1_BASE		0xfffb7800
+#define OMAP_MMC1_INT		INT_MMC
+#endif
+#define	OMAP_MMC2_BASE		0xfffb7c00	/* omap16xx only */
+
+static struct omap_mmc_conf mmc1_conf;
+
+static u64 mmc1_dmamask = 0xffffffff;
+
+static struct resource mmc1_resources[] = {
+	{
+		.start		= IO_ADDRESS(OMAP_MMC1_BASE),
+		.end		= IO_ADDRESS(OMAP_MMC1_BASE) + 0x7f,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= OMAP_MMC1_INT,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device mmc_omap_device1 = {
+	.name		= "mmci-omap",
+	.id		= 1,
+	.dev = {
+		.release	= omap_nop_release,
+		.dma_mask	= &mmc1_dmamask,
+		.platform_data	= &mmc1_conf,
+	},
+	.num_resources	= ARRAY_SIZE(mmc1_resources),
+	.resource	= mmc1_resources,
+};
+
+#ifdef	CONFIG_ARCH_OMAP16XX
+
+static struct omap_mmc_conf mmc2_conf;
+
+static u64 mmc2_dmamask = 0xffffffff;
+
+static struct resource mmc2_resources[] = {
+	{
+		.start		= IO_ADDRESS(OMAP_MMC2_BASE),
+		.end		= IO_ADDRESS(OMAP_MMC2_BASE) + 0x7f,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= INT_1610_MMC2,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device mmc_omap_device2 = {
+	.name		= "mmci-omap",
+	.id		= 2,
+	.dev = {
+		.release	= omap_nop_release,
+		.dma_mask	= &mmc2_dmamask,
+		.platform_data	= &mmc2_conf,
+	},
+	.num_resources	= ARRAY_SIZE(mmc2_resources),
+	.resource	= mmc2_resources,
+};
+#endif
+
+static void __init omap_init_mmc(void)
+{
+	const struct omap_mmc_config	*mmc_conf;
+	const struct omap_mmc_conf	*mmc;
+
+	/* NOTE:  assumes MMC was never (wrongly) enabled */
+	mmc_conf = omap_get_config(OMAP_TAG_MMC, struct omap_mmc_config);
+	if (!mmc_conf)
+		return;
+
+	/* block 1 is always available and has just one pinout option */
+	mmc = &mmc_conf->mmc[0];
+	if (mmc->enabled) {
+		if (!cpu_is_omap24xx()) {
+			omap_cfg_reg(MMC_CMD);
+			omap_cfg_reg(MMC_CLK);
+			omap_cfg_reg(MMC_DAT0);
+			if (cpu_is_omap1710()) {
+				omap_cfg_reg(M15_1710_MMC_CLKI);
+				omap_cfg_reg(P19_1710_MMC_CMDDIR);
+				omap_cfg_reg(P20_1710_MMC_DATDIR0);
+			}
+		}
+		if (mmc->wire4) {
+			if (!cpu_is_omap24xx()) {
+				omap_cfg_reg(MMC_DAT1);
+				/* NOTE:  DAT2 can be on W10 (here) or M15 */
+				if (!mmc->nomux)
+					omap_cfg_reg(MMC_DAT2);
+				omap_cfg_reg(MMC_DAT3);
+			}
+		}
+		mmc1_conf = *mmc;
+		(void) platform_device_register(&mmc_omap_device1);
+	}
+
+#ifdef	CONFIG_ARCH_OMAP16XX
+	/* block 2 is on newer chips, and has many pinout options */
+	mmc = &mmc_conf->mmc[1];
+	if (mmc->enabled) {
+		if (!mmc->nomux) {
+			omap_cfg_reg(Y8_1610_MMC2_CMD);
+			omap_cfg_reg(Y10_1610_MMC2_CLK);
+			omap_cfg_reg(R18_1610_MMC2_CLKIN);
+			omap_cfg_reg(W8_1610_MMC2_DAT0);
+			if (mmc->wire4) {
+				omap_cfg_reg(V8_1610_MMC2_DAT1);
+				omap_cfg_reg(W15_1610_MMC2_DAT2);
+				omap_cfg_reg(R10_1610_MMC2_DAT3);
+			}
+
+			/* These are needed for the level shifter */
+			omap_cfg_reg(V9_1610_MMC2_CMDDIR);
+			omap_cfg_reg(V5_1610_MMC2_DATDIR0);
+			omap_cfg_reg(W19_1610_MMC2_DATDIR1);
+		}
+
+		/* Feedback clock must be set on OMAP-1710 MMC2 */
+		if (cpu_is_omap1710())
+			omap_writel(omap_readl(MOD_CONF_CTRL_1) | (1 << 24),
+				     MOD_CONF_CTRL_1);
+		mmc2_conf = *mmc;
+		(void) platform_device_register(&mmc_omap_device2);
+	}
+#endif
+	return;
+}
+#else
+static inline void omap_init_mmc(void) {}
+#endif
+
+#if	defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP24XX
+#define	OMAP_WDT_BASE		0x48022000
+#else
+#define	OMAP_WDT_BASE		0xfffeb000
+#endif
+
+static struct resource wdt_resources[] = {
+	{
+		.start		= OMAP_WDT_BASE,
+		.end		= OMAP_WDT_BASE + 0x4f,
+		.flags		= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device omap_wdt_device = {
+	.name	   = "omap_wdt",
+	.id	     = -1,
+	.dev = {
+		.release	= omap_nop_release,
+	},
+	.num_resources	= ARRAY_SIZE(wdt_resources),
+	.resource	= wdt_resources,
+};
+
+static void omap_init_wdt(void)
+{
+	(void) platform_device_register(&omap_wdt_device);
+}
+#else
+static inline void omap_init_wdt(void) {}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#if	defined(CONFIG_OMAP_RNG) || defined(CONFIG_OMAP_RNG_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP24XX
+#define	OMAP_RNG_BASE		0x480A0000
+#else
+#define	OMAP_RNG_BASE		0xfffe5000
+#endif
+
+static struct resource rng_resources[] = {
+	{
+		.start		= OMAP_RNG_BASE,
+		.end		= OMAP_RNG_BASE + 0x4f,
+		.flags		= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device omap_rng_device = {
+	.name	   = "omap_rng",
+	.id	     = -1,
+	.dev = {
+		.release	= omap_nop_release,
+	},
+	.num_resources	= ARRAY_SIZE(rng_resources),
+	.resource	= rng_resources,
+};
+
+static void omap_init_rng(void)
+{
+	(void) platform_device_register(&omap_rng_device);
+}
+#else
+static inline void omap_init_rng(void) {}
+#endif
+
+#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
+
+static struct omap_lcd_config omap_fb_conf;
+
+static u64 omap_fb_dma_mask = ~(u32)0;
+
+static struct platform_device omap_fb_device = {
+	.name		= "omapfb",
+	.id		= -1,
+	.dev = {
+		.release		= omap_nop_release,
+		.dma_mask		= &omap_fb_dma_mask,
+		.coherent_dma_mask	= ~(u32)0,
+		.platform_data		= &omap_fb_conf,
+	},
+	.num_resources = 0,
+};
+
+static inline void omap_init_fb(void)
+{
+	const struct omap_lcd_config *conf;
+
+	conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
+	if (conf != NULL)
+		omap_fb_conf = *conf;
+	platform_device_register(&omap_fb_device);
+}
+
+#else
+
+static inline void omap_init_fb(void) {}
+
+#endif
+
+/*
+ * This gets called after board-specific INIT_MACHINE, and initializes most
+ * on-chip peripherals accessible on this board (except for few like USB):
+ *
+ *  (a) Does any "standard config" pin muxing needed.  Board-specific
+ *	code will have muxed GPIO pins and done "nonstandard" setup;
+ *	that code could live in the boot loader.
+ *  (b) Populating board-specific platform_data with the data drivers
+ *	rely on to handle wiring variations.
+ *  (c) Creating platform devices as meaningful on this board and
+ *	with this kernel configuration.
+ *
+ * Claiming GPIOs, and setting their direction and initial values, is the
+ * responsibility of the device drivers.  So is responding to probe().
+ *
+ * Board-specific knowlege like creating devices or pin setup is to be
+ * kept out of drivers as much as possible.  In particular, pin setup
+ * may be handled by the boot loader, and drivers should expect it will
+ * normally have been done by the time they're probed.
+ */
+static int __init omap_init_devices(void)
+{
+	/* please keep these calls, and their implementations above,
+	 * in alphabetical order so they're easier to sort through.
+	 */
+	omap_init_fb();
+	omap_init_i2c();
+	omap_init_mmc();
+	omap_init_wdt();
+	omap_init_rng();
+
+	return 0;
+}
+arch_initcall(omap_init_devices);
+
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index da7b651..f5cc21a 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -6,6 +6,8 @@
  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
  * Graphics DMA and LCD DMA graphics tranformations
  * by Imre Deak <imre.deak@nokia.com>
+ * OMAP2 support Copyright (C) 2004-2005 Texas Instruments, Inc.
+ * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
  *
  * Support functions for the OMAP internal DMA channels.
@@ -31,8 +33,15 @@
 
 #include <asm/arch/tc.h>
 
-#define OMAP_DMA_ACTIVE		0x01
+#define DEBUG_PRINTS
+#undef DEBUG_PRINTS
+#ifdef DEBUG_PRINTS
+#define debug_printk(x) printk x
+#else
+#define	debug_printk(x)
+#endif
 
+#define OMAP_DMA_ACTIVE		0x01
 #define OMAP_DMA_CCR_EN		(1 << 7)
 
 #define OMAP_FUNC_MUX_ARM_BASE	(0xfffe1000 + 0xec)
@@ -55,7 +64,7 @@
 static spinlock_t dma_chan_lock;
 static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT];
 
-const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
+const static u8 omap1_dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
 	INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
 	INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
 	INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
@@ -63,6 +72,20 @@
 	INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
 };
 
+#define REVISIT_24XX()		printk(KERN_ERR "FIXME: no %s on 24xx\n", \
+						__FUNCTION__);
+
+#ifdef CONFIG_ARCH_OMAP15XX
+/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
+int omap_dma_in_1510_mode(void)
+{
+	return enable_1510_mode;
+}
+#else
+#define omap_dma_in_1510_mode()		0
+#endif
+
+#ifdef CONFIG_ARCH_OMAP1
 static inline int get_gdma_dev(int req)
 {
 	u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
@@ -82,6 +105,9 @@
 	l |= (dev - 1) << shift;
 	omap_writel(l, reg);
 }
+#else
+#define set_gdma_dev(req, dev)	do {} while (0)
+#endif
 
 static void clear_lch_regs(int lch)
 {
@@ -121,38 +147,62 @@
 }
 
 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
-				  int frame_count, int sync_mode)
+				  int frame_count, int sync_mode,
+				  int dma_trigger, int src_or_dst_synch)
 {
-	u16 w;
+	OMAP_DMA_CSDP_REG(lch) &= ~0x03;
+	OMAP_DMA_CSDP_REG(lch) |= data_type;
 
-	w = omap_readw(OMAP_DMA_CSDP(lch));
-	w &= ~0x03;
-	w |= data_type;
-	omap_writew(w, OMAP_DMA_CSDP(lch));
+	if (cpu_class_is_omap1()) {
+		OMAP_DMA_CCR_REG(lch) &= ~(1 << 5);
+		if (sync_mode == OMAP_DMA_SYNC_FRAME)
+			OMAP_DMA_CCR_REG(lch) |= 1 << 5;
 
-	w = omap_readw(OMAP_DMA_CCR(lch));
-	w &= ~(1 << 5);
-	if (sync_mode == OMAP_DMA_SYNC_FRAME)
-		w |= 1 << 5;
-	omap_writew(w, OMAP_DMA_CCR(lch));
+		OMAP1_DMA_CCR2_REG(lch) &= ~(1 << 2);
+		if (sync_mode == OMAP_DMA_SYNC_BLOCK)
+			OMAP1_DMA_CCR2_REG(lch) |= 1 << 2;
+	}
 
-	w = omap_readw(OMAP_DMA_CCR2(lch));
-	w &= ~(1 << 2);
-	if (sync_mode == OMAP_DMA_SYNC_BLOCK)
-		w |= 1 << 2;
-	omap_writew(w, OMAP_DMA_CCR2(lch));
+	if (cpu_is_omap24xx() && dma_trigger) {
+		u32 val = OMAP_DMA_CCR_REG(lch);
 
-	omap_writew(elem_count, OMAP_DMA_CEN(lch));
-	omap_writew(frame_count, OMAP_DMA_CFN(lch));
+		if (dma_trigger > 63)
+			val |= 1 << 20;
+		if (dma_trigger > 31)
+			val |= 1 << 19;
 
+		val |= (dma_trigger & 0x1f);
+
+		if (sync_mode & OMAP_DMA_SYNC_FRAME)
+			val |= 1 << 5;
+
+		if (sync_mode & OMAP_DMA_SYNC_BLOCK)
+			val |= 1 << 18;
+
+		if (src_or_dst_synch)
+			val |= 1 << 24;		/* source synch */
+		else
+			val &= ~(1 << 24);	/* dest synch */
+
+		OMAP_DMA_CCR_REG(lch) = val;
+	}
+
+	OMAP_DMA_CEN_REG(lch) = elem_count;
+	OMAP_DMA_CFN_REG(lch) = frame_count;
 }
+
 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
 {
 	u16 w;
 
 	BUG_ON(omap_dma_in_1510_mode());
 
-	w = omap_readw(OMAP_DMA_CCR2(lch)) & ~0x03;
+	if (cpu_is_omap24xx()) {
+		REVISIT_24XX();
+		return;
+	}
+
+	w = OMAP1_DMA_CCR2_REG(lch) & ~0x03;
 	switch (mode) {
 	case OMAP_DMA_CONSTANT_FILL:
 		w |= 0x01;
@@ -165,63 +215,84 @@
 	default:
 		BUG();
 	}
-	omap_writew(w, OMAP_DMA_CCR2(lch));
+	OMAP1_DMA_CCR2_REG(lch) = w;
 
-	w = omap_readw(OMAP_DMA_LCH_CTRL(lch)) & ~0x0f;
+	w = OMAP1_DMA_LCH_CTRL_REG(lch) & ~0x0f;
 	/* Default is channel type 2D */
 	if (mode) {
-		omap_writew((u16)color, OMAP_DMA_COLOR_L(lch));
-		omap_writew((u16)(color >> 16), OMAP_DMA_COLOR_U(lch));
+		OMAP1_DMA_COLOR_L_REG(lch) = (u16)color;
+		OMAP1_DMA_COLOR_U_REG(lch) = (u16)(color >> 16);
 		w |= 1;		/* Channel type G */
 	}
-	omap_writew(w, OMAP_DMA_LCH_CTRL(lch));
+	OMAP1_DMA_LCH_CTRL_REG(lch) = w;
 }
 
-
+/* Note that src_port is only for omap1 */
 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
-			     unsigned long src_start)
+			     unsigned long src_start,
+			     int src_ei, int src_fi)
 {
-	u16 w;
+	if (cpu_class_is_omap1()) {
+		OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 2);
+		OMAP_DMA_CSDP_REG(lch) |= src_port << 2;
+	}
 
-	w = omap_readw(OMAP_DMA_CSDP(lch));
-	w &= ~(0x1f << 2);
-	w |= src_port << 2;
-	omap_writew(w, OMAP_DMA_CSDP(lch));
+	OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 12);
+	OMAP_DMA_CCR_REG(lch) |= src_amode << 12;
 
-	w = omap_readw(OMAP_DMA_CCR(lch));
-	w &= ~(0x03 << 12);
-	w |= src_amode << 12;
-	omap_writew(w, OMAP_DMA_CCR(lch));
+	if (cpu_class_is_omap1()) {
+		OMAP1_DMA_CSSA_U_REG(lch) = src_start >> 16;
+		OMAP1_DMA_CSSA_L_REG(lch) = src_start;
+	}
 
-	omap_writew(src_start >> 16, OMAP_DMA_CSSA_U(lch));
-	omap_writew(src_start, OMAP_DMA_CSSA_L(lch));
+	if (cpu_is_omap24xx())
+		OMAP2_DMA_CSSA_REG(lch) = src_start;
+
+	OMAP_DMA_CSEI_REG(lch) = src_ei;
+	OMAP_DMA_CSFI_REG(lch) = src_fi;
+}
+
+void omap_set_dma_params(int lch, struct omap_dma_channel_params * params)
+{
+	omap_set_dma_transfer_params(lch, params->data_type,
+				     params->elem_count, params->frame_count,
+				     params->sync_mode, params->trigger,
+				     params->src_or_dst_synch);
+	omap_set_dma_src_params(lch, params->src_port,
+				params->src_amode, params->src_start,
+				params->src_ei, params->src_fi);
+
+	omap_set_dma_dest_params(lch, params->dst_port,
+				 params->dst_amode, params->dst_start,
+				 params->dst_ei, params->dst_fi);
 }
 
 void omap_set_dma_src_index(int lch, int eidx, int fidx)
 {
-	omap_writew(eidx, OMAP_DMA_CSEI(lch));
-	omap_writew(fidx, OMAP_DMA_CSFI(lch));
+	if (cpu_is_omap24xx()) {
+		REVISIT_24XX();
+		return;
+	}
+	OMAP_DMA_CSEI_REG(lch) = eidx;
+	OMAP_DMA_CSFI_REG(lch) = fidx;
 }
 
 void omap_set_dma_src_data_pack(int lch, int enable)
 {
-	u16 w;
-
-	w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 6);
-	w |= enable ? (1 << 6) : 0;
-	omap_writew(w, OMAP_DMA_CSDP(lch));
+	OMAP_DMA_CSDP_REG(lch) &= ~(1 << 6);
+	if (enable)
+		OMAP_DMA_CSDP_REG(lch) |= (1 << 6);
 }
 
 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
 {
-	u16 w;
+	OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7);
 
-	w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 7);
 	switch (burst_mode) {
 	case OMAP_DMA_DATA_BURST_DIS:
 		break;
 	case OMAP_DMA_DATA_BURST_4:
-		w |= (0x01 << 7);
+		OMAP_DMA_CSDP_REG(lch) |= (0x02 << 7);
 		break;
 	case OMAP_DMA_DATA_BURST_8:
 		/* not supported by current hardware
@@ -231,175 +302,87 @@
 	default:
 		BUG();
 	}
-	omap_writew(w, OMAP_DMA_CSDP(lch));
 }
 
+/* Note that dest_port is only for OMAP1 */
 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
-			      unsigned long dest_start)
+			      unsigned long dest_start,
+			      int dst_ei, int dst_fi)
 {
-	u16 w;
+	if (cpu_class_is_omap1()) {
+		OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 9);
+		OMAP_DMA_CSDP_REG(lch) |= dest_port << 9;
+	}
 
-	w = omap_readw(OMAP_DMA_CSDP(lch));
-	w &= ~(0x1f << 9);
-	w |= dest_port << 9;
-	omap_writew(w, OMAP_DMA_CSDP(lch));
+	OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 14);
+	OMAP_DMA_CCR_REG(lch) |= dest_amode << 14;
 
-	w = omap_readw(OMAP_DMA_CCR(lch));
-	w &= ~(0x03 << 14);
-	w |= dest_amode << 14;
-	omap_writew(w, OMAP_DMA_CCR(lch));
+	if (cpu_class_is_omap1()) {
+		OMAP1_DMA_CDSA_U_REG(lch) = dest_start >> 16;
+		OMAP1_DMA_CDSA_L_REG(lch) = dest_start;
+	}
 
-	omap_writew(dest_start >> 16, OMAP_DMA_CDSA_U(lch));
-	omap_writew(dest_start, OMAP_DMA_CDSA_L(lch));
+	if (cpu_is_omap24xx())
+		OMAP2_DMA_CDSA_REG(lch) = dest_start;
+
+	OMAP_DMA_CDEI_REG(lch) = dst_ei;
+	OMAP_DMA_CDFI_REG(lch) = dst_fi;
 }
 
 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
 {
-	omap_writew(eidx, OMAP_DMA_CDEI(lch));
-	omap_writew(fidx, OMAP_DMA_CDFI(lch));
+	if (cpu_is_omap24xx()) {
+		REVISIT_24XX();
+		return;
+	}
+	OMAP_DMA_CDEI_REG(lch) = eidx;
+	OMAP_DMA_CDFI_REG(lch) = fidx;
 }
 
 void omap_set_dma_dest_data_pack(int lch, int enable)
 {
-	u16 w;
-
-	w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 13);
-	w |= enable ? (1 << 13) : 0;
-	omap_writew(w, OMAP_DMA_CSDP(lch));
+	OMAP_DMA_CSDP_REG(lch) &= ~(1 << 13);
+	if (enable)
+		OMAP_DMA_CSDP_REG(lch) |= 1 << 13;
 }
 
 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
 {
-	u16 w;
+	OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14);
 
-	w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 14);
 	switch (burst_mode) {
 	case OMAP_DMA_DATA_BURST_DIS:
 		break;
 	case OMAP_DMA_DATA_BURST_4:
-		w |= (0x01 << 14);
+		OMAP_DMA_CSDP_REG(lch) |= (0x02 << 14);
 		break;
 	case OMAP_DMA_DATA_BURST_8:
-		w |= (0x03 << 14);
+		OMAP_DMA_CSDP_REG(lch) |= (0x03 << 14);
 		break;
 	default:
 		printk(KERN_ERR "Invalid DMA burst mode\n");
 		BUG();
 		return;
 	}
-	omap_writew(w, OMAP_DMA_CSDP(lch));
 }
 
-static inline void init_intr(int lch)
+static inline void omap_enable_channel_irq(int lch)
 {
-	u16 w;
+	u32 status;
 
 	/* Read CSR to make sure it's cleared. */
-	w = omap_readw(OMAP_DMA_CSR(lch));
+	status = OMAP_DMA_CSR_REG(lch);
+
 	/* Enable some nice interrupts. */
-	omap_writew(dma_chan[lch].enabled_irqs, OMAP_DMA_CICR(lch));
+	OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs;
+
 	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
 }
 
-static inline void enable_lnk(int lch)
+static void omap_disable_channel_irq(int lch)
 {
-	u16 w;
-
-	/* Clear the STOP_LNK bits */
-	w = omap_readw(OMAP_DMA_CLNK_CTRL(lch));
-	w &= ~(1 << 14);
-	omap_writew(w, OMAP_DMA_CLNK_CTRL(lch));
-
-	/* And set the ENABLE_LNK bits */
-	if (dma_chan[lch].next_lch != -1)
-		omap_writew(dma_chan[lch].next_lch | (1 << 15),
-			    OMAP_DMA_CLNK_CTRL(lch));
-}
-
-static inline void disable_lnk(int lch)
-{
-	u16 w;
-
-	/* Disable interrupts */
-	omap_writew(0, OMAP_DMA_CICR(lch));
-
-	/* Set the STOP_LNK bit */
-	w = omap_readw(OMAP_DMA_CLNK_CTRL(lch));
-	w |= (1 << 14);
-	w = omap_writew(w, OMAP_DMA_CLNK_CTRL(lch));
-
-	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
-}
-
-void omap_start_dma(int lch)
-{
-	u16 w;
-
-	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
-		int next_lch, cur_lch;
-		char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
-
-		dma_chan_link_map[lch] = 1;
-		/* Set the link register of the first channel */
-		enable_lnk(lch);
-
-		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
-		cur_lch = dma_chan[lch].next_lch;
-		do {
-			next_lch = dma_chan[cur_lch].next_lch;
-
-                        /* The loop case: we've been here already */
-			if (dma_chan_link_map[cur_lch])
-				break;
-			/* Mark the current channel */
-			dma_chan_link_map[cur_lch] = 1;
-
-			enable_lnk(cur_lch);
-			init_intr(cur_lch);
-
-			cur_lch = next_lch;
-		} while (next_lch != -1);
-	}
-
-	init_intr(lch);
-
-	w = omap_readw(OMAP_DMA_CCR(lch));
-	w |= OMAP_DMA_CCR_EN;
-	omap_writew(w, OMAP_DMA_CCR(lch));
-	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
-}
-
-void omap_stop_dma(int lch)
-{
-	u16 w;
-
-	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
-		int next_lch, cur_lch = lch;
-		char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
-
-		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
-		do {
-			/* The loop case: we've been here already */
-			if (dma_chan_link_map[cur_lch])
-				break;
-			/* Mark the current channel */
-			dma_chan_link_map[cur_lch] = 1;
-
-			disable_lnk(cur_lch);
-
-			next_lch = dma_chan[cur_lch].next_lch;
-			cur_lch = next_lch;
-		} while (next_lch != -1);
-
-		return;
-	}
-	/* Disable all interrupts on the channel */
-	omap_writew(0, OMAP_DMA_CICR(lch));
-
-	w = omap_readw(OMAP_DMA_CCR(lch));
-	w &= ~OMAP_DMA_CCR_EN;
-	omap_writew(w, OMAP_DMA_CCR(lch));
-	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
+	if (cpu_is_omap24xx())
+		OMAP_DMA_CICR_REG(lch) = 0;
 }
 
 void omap_enable_dma_irq(int lch, u16 bits)
@@ -412,55 +395,45 @@
 	dma_chan[lch].enabled_irqs &= ~bits;
 }
 
-static int dma_handle_ch(int ch)
+static inline void enable_lnk(int lch)
 {
-	u16 csr;
+	if (cpu_class_is_omap1())
+		OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 14);
 
-	if (enable_1510_mode && ch >= 6) {
-		csr = dma_chan[ch].saved_csr;
-		dma_chan[ch].saved_csr = 0;
-	} else
-		csr = omap_readw(OMAP_DMA_CSR(ch));
-	if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
-		dma_chan[ch + 6].saved_csr = csr >> 7;
-		csr &= 0x7f;
-	}
-	if ((csr & 0x3f) == 0)
-		return 0;
-	if (unlikely(dma_chan[ch].dev_id == -1)) {
-		printk(KERN_WARNING "Spurious interrupt from DMA channel %d (CSR %04x)\n",
-		       ch, csr);
-		return 0;
-	}
-	if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
-		printk(KERN_WARNING "DMA timeout with device %d\n", dma_chan[ch].dev_id);
-	if (unlikely(csr & OMAP_DMA_DROP_IRQ))
-		printk(KERN_WARNING "DMA synchronization event drop occurred with device %d\n",
-		       dma_chan[ch].dev_id);
-	if (likely(csr & OMAP_DMA_BLOCK_IRQ))
-		dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
-	if (likely(dma_chan[ch].callback != NULL))
-		dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
-	return 1;
+	/* Set the ENABLE_LNK bits */
+	if (dma_chan[lch].next_lch != -1)
+		OMAP_DMA_CLNK_CTRL_REG(lch) =
+			dma_chan[lch].next_lch | (1 << 15);
 }
 
-static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
+static inline void disable_lnk(int lch)
 {
-	int ch = ((int) dev_id) - 1;
-	int handled = 0;
-
-	for (;;) {
-		int handled_now = 0;
-
-		handled_now += dma_handle_ch(ch);
-		if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
-			handled_now += dma_handle_ch(ch + 6);
-		if (!handled_now)
-			break;
-		handled += handled_now;
+	/* Disable interrupts */
+	if (cpu_class_is_omap1()) {
+		OMAP_DMA_CICR_REG(lch) = 0;
+		/* Set the STOP_LNK bit */
+		OMAP_DMA_CLNK_CTRL_REG(lch) |= 1 << 14;
 	}
 
-	return handled ? IRQ_HANDLED : IRQ_NONE;
+	if (cpu_is_omap24xx()) {
+		omap_disable_channel_irq(lch);
+		/* Clear the ENABLE_LNK bit */
+		OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 15);
+	}
+
+	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
+}
+
+static inline void omap2_enable_irq_lch(int lch)
+{
+	u32 val;
+
+	if (!cpu_is_omap24xx())
+		return;
+
+	val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
+	val |= 1 << lch;
+	omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
 }
 
 int omap_request_dma(int dev_id, const char *dev_name,
@@ -485,14 +458,23 @@
 	}
 	chan = dma_chan + free_ch;
 	chan->dev_id = dev_id;
-	clear_lch_regs(free_ch);
+
+	if (cpu_class_is_omap1())
+		clear_lch_regs(free_ch);
+
+	if (cpu_is_omap24xx())
+		omap_clear_dma(free_ch);
+
 	spin_unlock_irqrestore(&dma_chan_lock, flags);
 
-	chan->dev_id = dev_id;
 	chan->dev_name = dev_name;
 	chan->callback = callback;
 	chan->data = data;
-	chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
+	chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ |
+				OMAP_DMA_BLOCK_IRQ;
+
+	if (cpu_is_omap24xx())
+		chan->enabled_irqs |= OMAP2_DMA_TRANS_ERR_IRQ;
 
 	if (cpu_is_omap16xx()) {
 		/* If the sync device is set, configure it dynamically. */
@@ -502,37 +484,242 @@
 		}
 		/* Disable the 1510 compatibility mode and set the sync device
 		 * id. */
-		omap_writew(dev_id | (1 << 10), OMAP_DMA_CCR(free_ch));
-	} else {
-		omap_writew(dev_id, OMAP_DMA_CCR(free_ch));
+		OMAP_DMA_CCR_REG(free_ch) = dev_id | (1 << 10);
+	} else if (cpu_is_omap730() || cpu_is_omap15xx()) {
+		OMAP_DMA_CCR_REG(free_ch) = dev_id;
 	}
+
+	if (cpu_is_omap24xx()) {
+		omap2_enable_irq_lch(free_ch);
+
+		omap_enable_channel_irq(free_ch);
+		/* Clear the CSR register and IRQ status register */
+		OMAP_DMA_CSR_REG(free_ch) = 0x0;
+		omap_writel(~0x0, OMAP_DMA4_IRQSTATUS_L0);
+	}
+
 	*dma_ch_out = free_ch;
 
 	return 0;
 }
 
-void omap_free_dma(int ch)
+void omap_free_dma(int lch)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&dma_chan_lock, flags);
-	if (dma_chan[ch].dev_id == -1) {
-		printk("omap_dma: trying to free nonallocated DMA channel %d\n", ch);
+	if (dma_chan[lch].dev_id == -1) {
+		printk("omap_dma: trying to free nonallocated DMA channel %d\n",
+		       lch);
 		spin_unlock_irqrestore(&dma_chan_lock, flags);
 		return;
 	}
-	dma_chan[ch].dev_id = -1;
+	dma_chan[lch].dev_id = -1;
+	dma_chan[lch].next_lch = -1;
+	dma_chan[lch].callback = NULL;
 	spin_unlock_irqrestore(&dma_chan_lock, flags);
 
-	/* Disable all DMA interrupts for the channel. */
-	omap_writew(0, OMAP_DMA_CICR(ch));
-	/* Make sure the DMA transfer is stopped. */
-	omap_writew(0, OMAP_DMA_CCR(ch));
+	if (cpu_class_is_omap1()) {
+		/* Disable all DMA interrupts for the channel. */
+		OMAP_DMA_CICR_REG(lch) = 0;
+		/* Make sure the DMA transfer is stopped. */
+		OMAP_DMA_CCR_REG(lch) = 0;
+	}
+
+	if (cpu_is_omap24xx()) {
+		u32 val;
+		/* Disable interrupts */
+		val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
+		val &= ~(1 << lch);
+		omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
+
+		/* Clear the CSR register and IRQ status register */
+		OMAP_DMA_CSR_REG(lch) = 0x0;
+
+		val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
+		val |= 1 << lch;
+		omap_writel(val, OMAP_DMA4_IRQSTATUS_L0);
+
+		/* Disable all DMA interrupts for the channel. */
+		OMAP_DMA_CICR_REG(lch) = 0;
+
+		/* Make sure the DMA transfer is stopped. */
+		OMAP_DMA_CCR_REG(lch) = 0;
+		omap_clear_dma(lch);
+	}
 }
 
-int omap_dma_in_1510_mode(void)
+/*
+ * Clears any DMA state so the DMA engine is ready to restart with new buffers
+ * through omap_start_dma(). Any buffers in flight are discarded.
+ */
+void omap_clear_dma(int lch)
 {
-	return enable_1510_mode;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	if (cpu_class_is_omap1()) {
+		int status;
+		OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
+
+		/* Clear pending interrupts */
+		status = OMAP_DMA_CSR_REG(lch);
+	}
+
+	if (cpu_is_omap24xx()) {
+		int i;
+		u32 lch_base = OMAP24XX_DMA_BASE + lch * 0x60 + 0x80;
+		for (i = 0; i < 0x44; i += 4)
+			omap_writel(0, lch_base + i);
+	}
+
+	local_irq_restore(flags);
+}
+
+void omap_start_dma(int lch)
+{
+	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
+		int next_lch, cur_lch;
+		char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
+
+		dma_chan_link_map[lch] = 1;
+		/* Set the link register of the first channel */
+		enable_lnk(lch);
+
+		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
+		cur_lch = dma_chan[lch].next_lch;
+		do {
+			next_lch = dma_chan[cur_lch].next_lch;
+
+			/* The loop case: we've been here already */
+			if (dma_chan_link_map[cur_lch])
+				break;
+			/* Mark the current channel */
+			dma_chan_link_map[cur_lch] = 1;
+
+			enable_lnk(cur_lch);
+			omap_enable_channel_irq(cur_lch);
+
+			cur_lch = next_lch;
+		} while (next_lch != -1);
+	} else if (cpu_is_omap24xx()) {
+		/* Errata: Need to write lch even if not using chaining */
+		OMAP_DMA_CLNK_CTRL_REG(lch) = lch;
+	}
+
+	omap_enable_channel_irq(lch);
+
+	/* Errata: On ES2.0 BUFFERING disable must be set.
+	 * This will always fail on ES1.0 */
+	if (cpu_is_omap24xx()) {
+		OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
+	}
+
+	OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
+
+	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
+}
+
+void omap_stop_dma(int lch)
+{
+	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
+		int next_lch, cur_lch = lch;
+		char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
+
+		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
+		do {
+			/* The loop case: we've been here already */
+			if (dma_chan_link_map[cur_lch])
+				break;
+			/* Mark the current channel */
+			dma_chan_link_map[cur_lch] = 1;
+
+			disable_lnk(cur_lch);
+
+			next_lch = dma_chan[cur_lch].next_lch;
+			cur_lch = next_lch;
+		} while (next_lch != -1);
+
+		return;
+	}
+
+	/* Disable all interrupts on the channel */
+	if (cpu_class_is_omap1())
+		OMAP_DMA_CICR_REG(lch) = 0;
+
+	OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
+	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
+}
+
+/*
+ * Returns current physical source address for the given DMA channel.
+ * If the channel is running the caller must disable interrupts prior calling
+ * this function and process the returned value before re-enabling interrupt to
+ * prevent races with the interrupt handler. Note that in continuous mode there
+ * is a chance for CSSA_L register overflow inbetween the two reads resulting
+ * in incorrect return value.
+ */
+dma_addr_t omap_get_dma_src_pos(int lch)
+{
+	dma_addr_t offset;
+
+	if (cpu_class_is_omap1())
+		offset = (dma_addr_t) (OMAP1_DMA_CSSA_L_REG(lch) |
+				       (OMAP1_DMA_CSSA_U_REG(lch) << 16));
+
+	if (cpu_is_omap24xx())
+		offset = OMAP_DMA_CSAC_REG(lch);
+
+	return offset;
+}
+
+/*
+ * Returns current physical destination address for the given DMA channel.
+ * If the channel is running the caller must disable interrupts prior calling
+ * this function and process the returned value before re-enabling interrupt to
+ * prevent races with the interrupt handler. Note that in continuous mode there
+ * is a chance for CDSA_L register overflow inbetween the two reads resulting
+ * in incorrect return value.
+ */
+dma_addr_t omap_get_dma_dst_pos(int lch)
+{
+	dma_addr_t offset;
+
+	if (cpu_class_is_omap1())
+		offset = (dma_addr_t) (OMAP1_DMA_CDSA_L_REG(lch) |
+				       (OMAP1_DMA_CDSA_U_REG(lch) << 16));
+
+	if (cpu_is_omap24xx())
+		offset = OMAP2_DMA_CDSA_REG(lch);
+
+	return offset;
+}
+
+/*
+ * Returns current source transfer counting for the given DMA channel.
+ * Can be used to monitor the progress of a transfer inside a block.
+ * It must be called with disabled interrupts.
+ */
+int omap_get_dma_src_addr_counter(int lch)
+{
+	return (dma_addr_t) OMAP_DMA_CSAC_REG(lch);
+}
+
+int omap_dma_running(void)
+{
+	int lch;
+
+	/* Check if LCD DMA is running */
+	if (cpu_is_omap16xx())
+		if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
+			return 1;
+
+	for (lch = 0; lch < dma_chan_count; lch++)
+		if (OMAP_DMA_CCR_REG(lch) & OMAP_DMA_CCR_EN)
+			return 1;
+
+	return 0;
 }
 
 /*
@@ -550,7 +737,8 @@
 
 	if ((dma_chan[lch_head].dev_id == -1) ||
 	    (dma_chan[lch_queue].dev_id == -1)) {
-		printk(KERN_ERR "omap_dma: trying to link non requested channels\n");
+		printk(KERN_ERR "omap_dma: trying to link "
+		       "non requested channels\n");
 		dump_stack();
 	}
 
@@ -570,20 +758,149 @@
 
 	if (dma_chan[lch_head].next_lch != lch_queue ||
 	    dma_chan[lch_head].next_lch == -1) {
-		printk(KERN_ERR "omap_dma: trying to unlink non linked channels\n");
+		printk(KERN_ERR "omap_dma: trying to unlink "
+		       "non linked channels\n");
 		dump_stack();
 	}
 
 
 	if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
 	    (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
-		printk(KERN_ERR "omap_dma: You need to stop the DMA channels before unlinking\n");
+		printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
+		       "before unlinking\n");
 		dump_stack();
 	}
 
 	dma_chan[lch_head].next_lch = -1;
 }
 
+/*----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_OMAP1
+
+static int omap1_dma_handle_ch(int ch)
+{
+	u16 csr;
+
+	if (enable_1510_mode && ch >= 6) {
+		csr = dma_chan[ch].saved_csr;
+		dma_chan[ch].saved_csr = 0;
+	} else
+		csr = OMAP_DMA_CSR_REG(ch);
+	if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
+		dma_chan[ch + 6].saved_csr = csr >> 7;
+		csr &= 0x7f;
+	}
+	if ((csr & 0x3f) == 0)
+		return 0;
+	if (unlikely(dma_chan[ch].dev_id == -1)) {
+		printk(KERN_WARNING "Spurious interrupt from DMA channel "
+		       "%d (CSR %04x)\n", ch, csr);
+		return 0;
+	}
+	if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
+		printk(KERN_WARNING "DMA timeout with device %d\n",
+		       dma_chan[ch].dev_id);
+	if (unlikely(csr & OMAP_DMA_DROP_IRQ))
+		printk(KERN_WARNING "DMA synchronization event drop occurred "
+		       "with device %d\n", dma_chan[ch].dev_id);
+	if (likely(csr & OMAP_DMA_BLOCK_IRQ))
+		dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
+	if (likely(dma_chan[ch].callback != NULL))
+		dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
+	return 1;
+}
+
+static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id,
+					 struct pt_regs *regs)
+{
+	int ch = ((int) dev_id) - 1;
+	int handled = 0;
+
+	for (;;) {
+		int handled_now = 0;
+
+		handled_now += omap1_dma_handle_ch(ch);
+		if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
+			handled_now += omap1_dma_handle_ch(ch + 6);
+		if (!handled_now)
+			break;
+		handled += handled_now;
+	}
+
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+#else
+#define omap1_dma_irq_handler	NULL
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2
+
+static int omap2_dma_handle_ch(int ch)
+{
+	u32 status = OMAP_DMA_CSR_REG(ch);
+	u32 val;
+
+	if (!status)
+		return 0;
+	if (unlikely(dma_chan[ch].dev_id == -1))
+		return 0;
+	/* REVISIT: According to 24xx TRM, there's no TOUT_IE */
+	if (unlikely(status & OMAP_DMA_TOUT_IRQ))
+		printk(KERN_INFO "DMA timeout with device %d\n",
+		       dma_chan[ch].dev_id);
+	if (unlikely(status & OMAP_DMA_DROP_IRQ))
+		printk(KERN_INFO
+		       "DMA synchronization event drop occurred with device "
+		       "%d\n", dma_chan[ch].dev_id);
+
+	if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ))
+		printk(KERN_INFO "DMA transaction error with device %d\n",
+		       dma_chan[ch].dev_id);
+
+	OMAP_DMA_CSR_REG(ch) = 0x20;
+
+	val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
+	/* ch in this function is from 0-31 while in register it is 1-32 */
+	val = 1 << (ch);
+	omap_writel(val, OMAP_DMA4_IRQSTATUS_L0);
+
+	if (likely(dma_chan[ch].callback != NULL))
+		dma_chan[ch].callback(ch, status, dma_chan[ch].data);
+
+	return 0;
+}
+
+/* STATUS register count is from 1-32 while our is 0-31 */
+static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id,
+					 struct pt_regs *regs)
+{
+	u32 val;
+	int i;
+
+	val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
+
+	for (i = 1; i <= OMAP_LOGICAL_DMA_CH_COUNT; i++) {
+		int active = val & (1 << (i - 1));
+		if (active)
+			omap2_dma_handle_ch(i - 1);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction omap24xx_dma_irq = {
+	.name = "DMA",
+	.handler = omap2_dma_irq_handler,
+	.flags = SA_INTERRUPT
+};
+
+#else
+static struct irqaction omap24xx_dma_irq;
+#endif
+
+/*----------------------------------------------------------------------------*/
 
 static struct lcd_dma_info {
 	spinlock_t lock;
@@ -795,7 +1112,7 @@
 	/* Always set the source port as SDRAM for now*/
 	w &= ~(0x03 << 6);
 	if (lcd_dma.callback != NULL)
-		w |= 1 << 1;            /* Block interrupt enable */
+		w |= 1 << 1;		/* Block interrupt enable */
 	else
 		w &= ~(1 << 1);
 	omap_writew(w, OMAP1610_DMA_LCD_CTRL);
@@ -814,7 +1131,8 @@
 	omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
 }
 
-static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id,
+				       struct pt_regs *regs)
 {
 	u16 w;
 
@@ -870,7 +1188,8 @@
 		return;
 	}
 	if (!enable_1510_mode)
-		omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR);
+		omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
+			    OMAP1610_DMA_LCD_CCR);
 	lcd_dma.reserved = 0;
 	spin_unlock(&lcd_dma.lock);
 }
@@ -939,93 +1258,24 @@
 	omap_writew(w, OMAP1610_DMA_LCD_CTRL);
 }
 
-/*
- * Clears any DMA state so the DMA engine is ready to restart with new buffers
- * through omap_start_dma(). Any buffers in flight are discarded.
- */
-void omap_clear_dma(int lch)
-{
-	unsigned long flags;
-	int status;
-
-	local_irq_save(flags);
-	omap_writew(omap_readw(OMAP_DMA_CCR(lch)) & ~OMAP_DMA_CCR_EN,
-		    OMAP_DMA_CCR(lch));
-	status = OMAP_DMA_CSR(lch);	/* clear pending interrupts */
-	local_irq_restore(flags);
-}
-
-/*
- * Returns current physical source address for the given DMA channel.
- * If the channel is running the caller must disable interrupts prior calling
- * this function and process the returned value before re-enabling interrupt to
- * prevent races with the interrupt handler. Note that in continuous mode there
- * is a chance for CSSA_L register overflow inbetween the two reads resulting
- * in incorrect return value.
- */
-dma_addr_t omap_get_dma_src_pos(int lch)
-{
-	return (dma_addr_t) (omap_readw(OMAP_DMA_CSSA_L(lch)) |
-	(omap_readw(OMAP_DMA_CSSA_U(lch)) << 16));
-}
-
-/*
- * Returns current physical destination address for the given DMA channel.
- * If the channel is running the caller must disable interrupts prior calling
- * this function and process the returned value before re-enabling interrupt to
- * prevent races with the interrupt handler. Note that in continuous mode there
- * is a chance for CDSA_L register overflow inbetween the two reads resulting
- * in incorrect return value.
- */
-dma_addr_t omap_get_dma_dst_pos(int lch)
-{
-	return (dma_addr_t) (omap_readw(OMAP_DMA_CDSA_L(lch)) |
-	(omap_readw(OMAP_DMA_CDSA_U(lch)) << 16));
-}
-
-/*
- * Returns current source transfer counting for the given DMA channel.
- * Can be used to monitor the progress of a transfer inside a  block.
- * It must be called with disabled interrupts.
- */
-int omap_get_dma_src_addr_counter(int lch)
-{
-	return (dma_addr_t) omap_readw(OMAP_DMA_CSAC(lch));
-}
-
-int omap_dma_running(void)
-{
-	int lch;
-
-	/* Check if LCD DMA is running */
-	if (cpu_is_omap16xx())
-		if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
-			return 1;
-
-	for (lch = 0; lch < dma_chan_count; lch++) {
-		u16 w;
-
-		w = omap_readw(OMAP_DMA_CCR(lch));
-		if (w & OMAP_DMA_CCR_EN)
-			return 1;
-	}
-	return 0;
-}
+/*----------------------------------------------------------------------------*/
 
 static int __init omap_init_dma(void)
 {
 	int ch, r;
 
-	if (cpu_is_omap1510()) {
-		printk(KERN_INFO "DMA support for OMAP1510 initialized\n");
+	if (cpu_is_omap15xx()) {
+		printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
 		dma_chan_count = 9;
 		enable_1510_mode = 1;
 	} else if (cpu_is_omap16xx() || cpu_is_omap730()) {
 		printk(KERN_INFO "OMAP DMA hardware version %d\n",
 		       omap_readw(OMAP_DMA_HW_ID));
 		printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
-		       (omap_readw(OMAP_DMA_CAPS_0_U) << 16) | omap_readw(OMAP_DMA_CAPS_0_L),
-		       (omap_readw(OMAP_DMA_CAPS_1_U) << 16) | omap_readw(OMAP_DMA_CAPS_1_L),
+		       (omap_readw(OMAP_DMA_CAPS_0_U) << 16) |
+		       omap_readw(OMAP_DMA_CAPS_0_L),
+		       (omap_readw(OMAP_DMA_CAPS_1_U) << 16) |
+		       omap_readw(OMAP_DMA_CAPS_1_L),
 		       omap_readw(OMAP_DMA_CAPS_2), omap_readw(OMAP_DMA_CAPS_3),
 		       omap_readw(OMAP_DMA_CAPS_4));
 		if (!enable_1510_mode) {
@@ -1038,6 +1288,11 @@
 			dma_chan_count = 16;
 		} else
 			dma_chan_count = 9;
+	} else if (cpu_is_omap24xx()) {
+		u8 revision = omap_readb(OMAP_DMA4_REVISION);
+		printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
+		       revision >> 4, revision & 0xf);
+		dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT;
 	} else {
 		dma_chan_count = 0;
 		return 0;
@@ -1049,41 +1304,56 @@
 	memset(&dma_chan, 0, sizeof(dma_chan));
 
 	for (ch = 0; ch < dma_chan_count; ch++) {
+		omap_clear_dma(ch);
 		dma_chan[ch].dev_id = -1;
 		dma_chan[ch].next_lch = -1;
 
 		if (ch >= 6 && enable_1510_mode)
 			continue;
 
-		/* request_irq() doesn't like dev_id (ie. ch) being zero,
-		 * so we have to kludge around this. */
-		r = request_irq(dma_irq[ch], dma_irq_handler, 0, "DMA",
-				(void *) (ch + 1));
+		if (cpu_class_is_omap1()) {
+			/* request_irq() doesn't like dev_id (ie. ch) being
+			 * zero, so we have to kludge around this. */
+			r = request_irq(omap1_dma_irq[ch],
+					omap1_dma_irq_handler, 0, "DMA",
+					(void *) (ch + 1));
+			if (r != 0) {
+				int i;
+
+				printk(KERN_ERR "unable to request IRQ %d "
+				       "for DMA (error %d)\n",
+				       omap1_dma_irq[ch], r);
+				for (i = 0; i < ch; i++)
+					free_irq(omap1_dma_irq[i],
+						 (void *) (i + 1));
+				return r;
+			}
+		}
+	}
+
+	if (cpu_is_omap24xx())
+		setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq);
+
+	/* FIXME: Update LCD DMA to work on 24xx */
+	if (cpu_class_is_omap1()) {
+		r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
+				"LCD DMA", NULL);
 		if (r != 0) {
 			int i;
 
-			printk(KERN_ERR "unable to request IRQ %d for DMA (error %d)\n",
-			       dma_irq[ch], r);
-			for (i = 0; i < ch; i++)
-				free_irq(dma_irq[i], (void *) (i + 1));
+			printk(KERN_ERR "unable to request IRQ for LCD DMA "
+			       "(error %d)\n", r);
+			for (i = 0; i < dma_chan_count; i++)
+				free_irq(omap1_dma_irq[i], (void *) (i + 1));
 			return r;
 		}
 	}
-	r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, "LCD DMA", NULL);
-	if (r != 0) {
-		int i;
 
-		printk(KERN_ERR "unable to request IRQ for LCD DMA (error %d)\n", r);
-		for (i = 0; i < dma_chan_count; i++)
-			free_irq(dma_irq[i], (void *) (i + 1));
-		return r;
-	}
 	return 0;
 }
 
 arch_initcall(omap_init_dma);
 
-
 EXPORT_SYMBOL(omap_get_dma_src_pos);
 EXPORT_SYMBOL(omap_get_dma_dst_pos);
 EXPORT_SYMBOL(omap_get_dma_src_addr_counter);
@@ -1109,6 +1379,8 @@
 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
 
+EXPORT_SYMBOL(omap_set_dma_params);
+
 EXPORT_SYMBOL(omap_dma_link_lch);
 EXPORT_SYMBOL(omap_dma_unlink_lch);
 
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 55059a2..76f721d 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -140,7 +140,7 @@
 };
 #endif
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 static struct gpio_bank gpio_bank_1510[2] = {
 	{ OMAP_MPUIO_BASE,    INT_MPUIO,      IH_MPUIO_BASE, METHOD_MPUIO },
 	{ OMAP1510_GPIO_BASE, INT_GPIO_BANK1, IH_GPIO_BASE,  METHOD_GPIO_1510 }
@@ -173,7 +173,7 @@
 
 static inline struct gpio_bank *get_gpio_bank(int gpio)
 {
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		if (OMAP_GPIO_IS_MPUIO(gpio))
 			return &gpio_bank[0];
@@ -222,7 +222,7 @@
 			return -1;
 		return 0;
 	}
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510() && gpio < 16)
 		return 0;
 #endif
@@ -654,7 +654,7 @@
 	/* Set trigger to none. You need to enable the trigger after request_irq */
 	_set_gpio_triggering(bank, get_gpio_index(gpio), IRQT_NOEDGE);
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (bank->method == METHOD_GPIO_1510) {
 		void __iomem *reg;
 
@@ -739,7 +739,7 @@
 	bank = (struct gpio_bank *) desc->data;
 	if (bank->method == METHOD_MPUIO)
 		isr_reg = bank->base + OMAP_MPUIO_GPIO_INT;
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (bank->method == METHOD_GPIO_1510)
 		isr_reg = bank->base + OMAP1510_GPIO_INT_STATUS;
 #endif
@@ -774,7 +774,7 @@
 			d = irq_desc + gpio_irq;
 			desc_handle_irq(gpio_irq, d, regs);
 		}
-        }
+	}
 }
 
 static void gpio_ack_irq(unsigned int irq)
@@ -837,8 +837,9 @@
 	.unmask = mpuio_unmask_irq
 };
 
-static int initialized = 0;
-static struct clk * gpio_ck = NULL;
+static int initialized;
+static struct clk * gpio_ick;
+static struct clk * gpio_fck;
 
 static int __init _omap_gpio_init(void)
 {
@@ -848,14 +849,26 @@
 	initialized = 1;
 
 	if (cpu_is_omap1510()) {
-		gpio_ck = clk_get(NULL, "arm_gpio_ck");
-		if (IS_ERR(gpio_ck))
+		gpio_ick = clk_get(NULL, "arm_gpio_ck");
+		if (IS_ERR(gpio_ick))
 			printk("Could not get arm_gpio_ck\n");
 		else
-			clk_use(gpio_ck);
+			clk_use(gpio_ick);
+	}
+	if (cpu_is_omap24xx()) {
+		gpio_ick = clk_get(NULL, "gpios_ick");
+		if (IS_ERR(gpio_ick))
+			printk("Could not get gpios_ick\n");
+		else
+			clk_use(gpio_ick);
+		gpio_fck = clk_get(NULL, "gpios_fck");
+		if (IS_ERR(gpio_ick))
+			printk("Could not get gpios_fck\n");
+		else
+			clk_use(gpio_fck);
 	}
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		printk(KERN_INFO "OMAP1510 GPIO hardware\n");
 		gpio_bank_count = 2;
@@ -901,7 +914,7 @@
 		if (bank->method == METHOD_MPUIO) {
 			omap_writew(0xFFFF, OMAP_MPUIO_BASE + OMAP_MPUIO_GPIO_MASKIT);
 		}
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 		if (bank->method == METHOD_GPIO_1510) {
 			__raw_writew(0xffff, bank->base + OMAP1510_GPIO_INT_MASK);
 			__raw_writew(0x0000, bank->base + OMAP1510_GPIO_INT_STATUS);
@@ -1038,6 +1051,7 @@
 
 /*
  * This may get called early from board specific init
+ * for boards that have interrupts routed via FPGA.
  */
 int omap_gpio_init(void)
 {
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 9c9b7df..ea9475c 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -491,17 +491,20 @@
 	omap_set_dma_transfer_params(mcbsp[id].dma_tx_lch,
 				     OMAP_DMA_DATA_TYPE_S16,
 				     length >> 1, 1,
-				     OMAP_DMA_SYNC_ELEMENT);
+				     OMAP_DMA_SYNC_ELEMENT,
+				     0, 0);
 
 	omap_set_dma_dest_params(mcbsp[id].dma_tx_lch,
 				 OMAP_DMA_PORT_TIPB,
 				 OMAP_DMA_AMODE_CONSTANT,
-				 mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1);
+				 mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1,
+				 0, 0);
 
 	omap_set_dma_src_params(mcbsp[id].dma_tx_lch,
 				OMAP_DMA_PORT_EMIFF,
 				OMAP_DMA_AMODE_POST_INC,
-				buffer);
+				buffer,
+				0, 0);
 
 	omap_start_dma(mcbsp[id].dma_tx_lch);
 	wait_for_completion(&(mcbsp[id].tx_dma_completion));
@@ -531,17 +534,20 @@
 	omap_set_dma_transfer_params(mcbsp[id].dma_rx_lch,
 				     OMAP_DMA_DATA_TYPE_S16,
 				     length >> 1, 1,
-				     OMAP_DMA_SYNC_ELEMENT);
+				     OMAP_DMA_SYNC_ELEMENT,
+				     0, 0);
 
 	omap_set_dma_src_params(mcbsp[id].dma_rx_lch,
 				OMAP_DMA_PORT_TIPB,
 				OMAP_DMA_AMODE_CONSTANT,
-				mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1);
+				mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1,
+				0, 0);
 
 	omap_set_dma_dest_params(mcbsp[id].dma_rx_lch,
 				 OMAP_DMA_PORT_EMIFF,
 				 OMAP_DMA_AMODE_POST_INC,
-				 buffer);
+				 buffer,
+				 0, 0);
 
 	omap_start_dma(mcbsp[id].dma_rx_lch);
 	wait_for_completion(&(mcbsp[id].rx_dma_completion));
@@ -643,7 +649,7 @@
 };
 #endif
 
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 static const struct omap_mcbsp_info mcbsp_1510[] = {
 	[0] = { .virt_base = OMAP1510_MCBSP1_BASE,
 		.dma_rx_sync = OMAP_DMA_MCBSP1_RX,
@@ -712,7 +718,7 @@
 		mcbsp_count = ARRAY_SIZE(mcbsp_730);
 	}
 #endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		mcbsp_info = mcbsp_1510;
 		mcbsp_count = ARRAY_SIZE(mcbsp_1510);
diff --git a/arch/arm/plat-omap/mux.c b/arch/arm/plat-omap/mux.c
index 6448204..8c1c016 100644
--- a/arch/arm/plat-omap/mux.c
+++ b/arch/arm/plat-omap/mux.c
@@ -3,7 +3,7 @@
  *
  * Utility to set the Omap MUX and PULL_DWN registers from a table in mux.h
  *
- * Copyright (C) 2003 Nokia Corporation
+ * Copyright (C) 2003 - 2005 Nokia Corporation
  *
  * Written by Tony Lindgren <tony.lindgren@nokia.com>
  *
@@ -25,38 +25,74 @@
 #include <linux/config.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/kernel.h>
 #include <asm/system.h>
 #include <asm/io.h>
 #include <linux/spinlock.h>
-
-#define __MUX_C__
 #include <asm/arch/mux.h>
 
 #ifdef CONFIG_OMAP_MUX
 
+#define OMAP24XX_L4_BASE	0x48000000
+#define OMAP24XX_PULL_ENA	(1 << 3)
+#define OMAP24XX_PULL_UP	(1 << 4)
+
+static struct pin_config * pin_table;
+static unsigned long pin_table_sz;
+
+extern struct pin_config * omap730_pins;
+extern struct pin_config * omap1xxx_pins;
+extern struct pin_config * omap24xx_pins;
+
+int __init omap_mux_register(struct pin_config * pins, unsigned long size)
+{
+	pin_table = pins;
+	pin_table_sz = size;
+
+	return 0;
+}
+
 /*
  * Sets the Omap MUX and PULL_DWN registers based on the table
  */
-int __init_or_module
-omap_cfg_reg(const reg_cfg_t reg_cfg)
+int __init_or_module omap_cfg_reg(const unsigned long index)
 {
 	static DEFINE_SPINLOCK(mux_spin_lock);
 
 	unsigned long flags;
-	reg_cfg_set *cfg;
+	struct pin_config *cfg;
 	unsigned int reg_orig = 0, reg = 0, pu_pd_orig = 0, pu_pd = 0,
 		pull_orig = 0, pull = 0;
 	unsigned int mask, warn = 0;
 
-	if (cpu_is_omap7xx())
-		return 0;
+	if (!pin_table)
+		BUG();
 
-	if (reg_cfg > ARRAY_SIZE(reg_cfg_table)) {
-		printk(KERN_ERR "MUX: reg_cfg %d\n", reg_cfg);
-		return -EINVAL;
+	if (index >= pin_table_sz) {
+		printk(KERN_ERR "Invalid pin mux index: %lu (%lu)\n",
+		       index, pin_table_sz);
+		dump_stack();
+		return -ENODEV;
 	}
 
-	cfg = (reg_cfg_set *)&reg_cfg_table[reg_cfg];
+	cfg = (struct pin_config *)&pin_table[index];
+	if (cpu_is_omap24xx()) {
+		u8 reg = 0;
+
+		reg |= cfg->mask & 0x7;
+		if (cfg->pull_val)
+			reg |= OMAP24XX_PULL_ENA;
+		if(cfg->pu_pd_val)
+			reg |= OMAP24XX_PULL_UP;
+#ifdef CONFIG_OMAP_MUX_DEBUG
+		printk("Muxing %s (0x%08x): 0x%02x -> 0x%02x\n",
+		       cfg->name, OMAP24XX_L4_BASE + cfg->mux_reg,
+		       omap_readb(OMAP24XX_L4_BASE + cfg->mux_reg), reg);
+#endif
+		omap_writeb(reg, OMAP24XX_L4_BASE + cfg->mux_reg);
+
+		return 0;
+	}
 
 	/* Check the mux register in question */
 	if (cfg->mux_reg) {
@@ -157,7 +193,8 @@
 	return 0;
 #endif
 }
-
 EXPORT_SYMBOL(omap_cfg_reg);
-
+#else
+#define omap_mux_init() do {} while(0)
+#define omap_cfg_reg(x)	do {} while(0)
 #endif	/* CONFIG_OMAP_MUX */
diff --git a/arch/arm/plat-omap/pm.c b/arch/arm/plat-omap/pm.c
index e15c6c1..966cca03 100644
--- a/arch/arm/plat-omap/pm.c
+++ b/arch/arm/plat-omap/pm.c
@@ -54,11 +54,12 @@
 #include <asm/arch/tps65010.h>
 #include <asm/arch/dsp_common.h>
 
-#include "clock.h"
-#include "sram.h"
+#include <asm/arch/clock.h>
+#include <asm/arch/sram.h>
 
 static unsigned int arm_sleep_save[ARM_SLEEP_SAVE_SIZE];
 static unsigned short ulpd_sleep_save[ULPD_SLEEP_SAVE_SIZE];
+static unsigned int mpui730_sleep_save[MPUI730_SLEEP_SAVE_SIZE];
 static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE];
 static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE];
 
@@ -120,8 +121,8 @@
  */
 static void omap_pm_wakeup_setup(void)
 {
-	u32 level1_wake = OMAP_IRQ_BIT(INT_IH2_IRQ);
-	u32 level2_wake = OMAP_IRQ_BIT(INT_UART2) | OMAP_IRQ_BIT(INT_KEYBOARD);
+	u32 level1_wake = 0;
+	u32 level2_wake = OMAP_IRQ_BIT(INT_UART2);
 
 	/*
 	 * Turn off all interrupts except GPIO bank 1, L1-2nd level cascade,
@@ -129,19 +130,29 @@
 	 * drivers must still separately call omap_set_gpio_wakeup() to
 	 * wake up to a GPIO interrupt.
 	 */
-	if (cpu_is_omap1510() || cpu_is_omap16xx())
-		level1_wake |= OMAP_IRQ_BIT(INT_GPIO_BANK1);
-	else if (cpu_is_omap730())
-		level1_wake |= OMAP_IRQ_BIT(INT_730_GPIO_BANK1);
+	if (cpu_is_omap730())
+		level1_wake = OMAP_IRQ_BIT(INT_730_GPIO_BANK1) |
+			OMAP_IRQ_BIT(INT_730_IH2_IRQ);
+	else if (cpu_is_omap1510())
+		level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
+			OMAP_IRQ_BIT(INT_1510_IH2_IRQ);
+	else if (cpu_is_omap16xx())
+		level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
+			OMAP_IRQ_BIT(INT_1610_IH2_IRQ);
 
 	omap_writel(~level1_wake, OMAP_IH1_MIR);
 
-	if (cpu_is_omap1510())
-		omap_writel(~level2_wake,  OMAP_IH2_MIR);
-
-	/* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */
-	if (cpu_is_omap16xx()) {
+	if (cpu_is_omap730()) {
 		omap_writel(~level2_wake, OMAP_IH2_0_MIR);
+		omap_writel(~(OMAP_IRQ_BIT(INT_730_WAKE_UP_REQ) | OMAP_IRQ_BIT(INT_730_MPUIO_KEYPAD)), OMAP_IH2_1_MIR);
+	} else if (cpu_is_omap1510()) {
+		level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
+		omap_writel(~level2_wake,  OMAP_IH2_MIR);
+	} else if (cpu_is_omap16xx()) {
+		level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
+		omap_writel(~level2_wake, OMAP_IH2_0_MIR);
+
+		/* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */
 		omap_writel(~OMAP_IRQ_BIT(INT_1610_WAKE_UP_REQ), OMAP_IH2_1_MIR);
 		omap_writel(~0x0, OMAP_IH2_2_MIR);
 		omap_writel(~0x0, OMAP_IH2_3_MIR);
@@ -185,7 +196,17 @@
  	 * Save interrupt, MPUI, ARM and UPLD control registers.
 	 */
 
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap730()) {
+		MPUI730_SAVE(OMAP_IH1_MIR);
+		MPUI730_SAVE(OMAP_IH2_0_MIR);
+		MPUI730_SAVE(OMAP_IH2_1_MIR);
+		MPUI730_SAVE(MPUI_CTRL);
+		MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG);
+		MPUI730_SAVE(MPUI_DSP_API_CONFIG);
+		MPUI730_SAVE(EMIFS_CONFIG);
+		MPUI730_SAVE(EMIFF_SDRAM_CONFIG);
+
+	} else if (cpu_is_omap1510()) {
 		MPUI1510_SAVE(OMAP_IH1_MIR);
 		MPUI1510_SAVE(OMAP_IH2_MIR);
 		MPUI1510_SAVE(MPUI_CTRL);
@@ -280,7 +301,13 @@
 	ULPD_RESTORE(ULPD_CLOCK_CTRL);
 	ULPD_RESTORE(ULPD_STATUS_REQ);
 
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap730()) {
+		MPUI730_RESTORE(EMIFS_CONFIG);
+		MPUI730_RESTORE(EMIFF_SDRAM_CONFIG);
+		MPUI730_RESTORE(OMAP_IH1_MIR);
+		MPUI730_RESTORE(OMAP_IH2_0_MIR);
+		MPUI730_RESTORE(OMAP_IH2_1_MIR);
+	} else if (cpu_is_omap1510()) {
 		MPUI1510_RESTORE(MPUI_CTRL);
 		MPUI1510_RESTORE(MPUI_DSP_BOOT_CONFIG);
 		MPUI1510_RESTORE(MPUI_DSP_API_CONFIG);
@@ -355,7 +382,14 @@
 	ULPD_SAVE(ULPD_DPLL_CTRL);
 	ULPD_SAVE(ULPD_POWER_CTRL);
 
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap730()) {
+		MPUI730_SAVE(MPUI_CTRL);
+		MPUI730_SAVE(MPUI_DSP_STATUS);
+		MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG);
+		MPUI730_SAVE(MPUI_DSP_API_CONFIG);
+		MPUI730_SAVE(EMIFF_SDRAM_CONFIG);
+		MPUI730_SAVE(EMIFS_CONFIG);
+	} else if (cpu_is_omap1510()) {
 		MPUI1510_SAVE(MPUI_CTRL);
 		MPUI1510_SAVE(MPUI_DSP_STATUS);
 		MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG);
@@ -404,7 +438,21 @@
 		   ULPD_SHOW(ULPD_STATUS_REQ),
 		   ULPD_SHOW(ULPD_POWER_CTRL));
 
-		if (cpu_is_omap1510()) {
+		if (cpu_is_omap730()) {
+			my_buffer_offset += sprintf(my_base + my_buffer_offset,
+			   "MPUI730_CTRL_REG	     0x%-8x \n"
+			   "MPUI730_DSP_STATUS_REG:      0x%-8x \n"
+			   "MPUI730_DSP_BOOT_CONFIG_REG: 0x%-8x \n"
+	 "MPUI730_DSP_API_CONFIG_REG:  0x%-8x \n"
+	 "MPUI730_SDRAM_CONFIG_REG:    0x%-8x \n"
+	 "MPUI730_EMIFS_CONFIG_REG:    0x%-8x \n",
+	 MPUI730_SHOW(MPUI_CTRL),
+	 MPUI730_SHOW(MPUI_DSP_STATUS),
+	 MPUI730_SHOW(MPUI_DSP_BOOT_CONFIG),
+	 MPUI730_SHOW(MPUI_DSP_API_CONFIG),
+	 MPUI730_SHOW(EMIFF_SDRAM_CONFIG),
+	 MPUI730_SHOW(EMIFS_CONFIG));
+		} else if (cpu_is_omap1510()) {
 			my_buffer_offset += sprintf(my_base + my_buffer_offset,
 			   "MPUI1510_CTRL_REG             0x%-8x \n"
 			   "MPUI1510_DSP_STATUS_REG:      0x%-8x \n"
@@ -553,7 +601,12 @@
 	 * These routines need to be in SRAM as that's the only
 	 * memory the MPU can see when it wakes up.
 	 */
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap730()) {
+		omap_sram_idle = omap_sram_push(omap730_idle_loop_suspend,
+						omap730_idle_loop_suspend_sz);
+		omap_sram_suspend = omap_sram_push(omap730_cpu_suspend,
+	 omap730_cpu_suspend_sz);
+	} else if (cpu_is_omap1510()) {
 		omap_sram_idle = omap_sram_push(omap1510_idle_loop_suspend,
 						omap1510_idle_loop_suspend_sz);
 		omap_sram_suspend = omap_sram_push(omap1510_cpu_suspend,
@@ -572,7 +625,11 @@
 
 	pm_idle = omap_pm_idle;
 
-	setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
+	if (cpu_is_omap730())
+		setup_irq(INT_730_WAKE_UP_REQ, &omap_wakeup_irq);
+	else if (cpu_is_omap16xx())
+		setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
+
 #if 0
 	/* --- BEGIN BOARD-DEPENDENT CODE --- */
 	/* Sleepx mask direction */
@@ -591,7 +648,9 @@
 	omap_writew(ULPD_POWER_CTRL_REG_VAL, ULPD_POWER_CTRL);
 
 	/* Configure IDLECT3 */
-	if (cpu_is_omap16xx())
+	if (cpu_is_omap730())
+		omap_writel(OMAP730_IDLECT3_VAL, OMAP730_IDLECT3);
+	else if (cpu_is_omap16xx())
 		omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3);
 
 	pm_set_ops(&omap_pm_ops);
@@ -600,8 +659,10 @@
 	omap_pm_init_proc();
 #endif
 
-	/* configure LOW_PWR pin */
-	omap_cfg_reg(T20_1610_LOW_PWR);
+	if (cpu_is_omap16xx()) {
+		/* configure LOW_PWR pin */
+		omap_cfg_reg(T20_1610_LOW_PWR);
+	}
 
 	return 0;
 }
diff --git a/arch/arm/plat-omap/sleep.S b/arch/arm/plat-omap/sleep.S
index 9f74583..4cd7d29 100644
--- a/arch/arm/plat-omap/sleep.S
+++ b/arch/arm/plat-omap/sleep.S
@@ -1,7 +1,7 @@
 /*
  * linux/arch/arm/plat-omap/sleep.S
  *
- * Low-level OMAP1510/1610 sleep/wakeUp support
+ * Low-level OMAP730/1510/1610 sleep/wakeUp support
  *
  * Initial SA1110 code:
  * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
@@ -52,7 +52,57 @@
  *       processor specific functions here.
  */
 
-#ifdef CONFIG_ARCH_OMAP1510
+#if defined(CONFIG_ARCH_OMAP730)
+ENTRY(omap730_idle_loop_suspend)
+
+	stmfd	sp!, {r0 - r12, lr}		@ save registers on stack
+
+	@ load base address of ARM_IDLECT1 and ARM_IDLECT2
+	mov	r4, #CLKGEN_REG_ASM_BASE & 0xff000000
+	orr	r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
+	orr	r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
+
+	@ turn off clock domains
+	@ get ARM_IDLECT2 into r2
+	ldrh	r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+	mov	r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff
+	orr	r5, r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff00
+	strh	r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+
+	@ request ARM idle
+	@ get ARM_IDLECT1 into r1
+	ldrh	r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+	orr	r3, r1, #OMAP730_IDLE_LOOP_REQUEST & 0xffff
+	strh	r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+
+	mov	r5, #IDLE_WAIT_CYCLES & 0xff
+	orr	r5, r5, #IDLE_WAIT_CYCLES & 0xff00
+l_730:	subs	r5, r5, #1
+	bne	l_730
+/*
+ * Let's wait for the next clock tick to wake us up.
+ */
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c0, 4		@ wait for interrupt
+/*
+ * omap730_idle_loop_suspend()'s resume point.
+ *
+ * It will just start executing here, so we'll restore stuff from the
+ * stack, reset the ARM_IDLECT1 and ARM_IDLECT2.
+ */
+
+	@ restore ARM_IDLECT1 and ARM_IDLECT2 and return
+	@ r1 has ARM_IDLECT1 and r2 still has ARM_IDLECT2
+	strh	r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+	strh	r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+
+	ldmfd	sp!, {r0 - r12, pc}		@ restore regs and return
+
+ENTRY(omap730_idle_loop_suspend_sz)
+	.word	. - omap730_idle_loop_suspend
+#endif /* CONFIG_ARCH_OMAP730 */
+
+#ifdef CONFIG_ARCH_OMAP15XX
 ENTRY(omap1510_idle_loop_suspend)
 
 	stmfd	sp!, {r0 - r12, lr}		@ save registers on stack
@@ -100,7 +150,7 @@
 
 ENTRY(omap1510_idle_loop_suspend_sz)
 	.word	. - omap1510_idle_loop_suspend
-#endif /* CONFIG_ARCH_OMAP1510 */
+#endif /* CONFIG_ARCH_OMAP15XX */
 
 #if defined(CONFIG_ARCH_OMAP16XX)
 ENTRY(omap1610_idle_loop_suspend)
@@ -169,7 +219,86 @@
  *
  */
 
-#ifdef CONFIG_ARCH_OMAP1510
+#if defined(CONFIG_ARCH_OMAP730)
+ENTRY(omap730_cpu_suspend)
+
+	@ save registers on stack
+	stmfd	sp!, {r0 - r12, lr}
+
+	@ Drain write cache
+	mov	r4, #0
+	mcr	p15, 0, r0, c7, c10, 4
+	nop
+
+	@ load base address of Traffic Controller
+	mov	r6, #TCMIF_ASM_BASE & 0xff000000
+	orr	r6, r6, #TCMIF_ASM_BASE & 0x00ff0000
+	orr	r6, r6, #TCMIF_ASM_BASE & 0x0000ff00
+
+	@ prepare to put SDRAM into self-refresh manually
+	ldr	r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
+	orr	r9, r7, #SELF_REFRESH_MODE & 0xff000000
+	orr	r9, r9, #SELF_REFRESH_MODE & 0x000000ff
+	str	r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
+
+	@ prepare to put EMIFS to Sleep
+	ldr	r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
+	orr	r9, r8, #IDLE_EMIFS_REQUEST & 0xff
+	str	r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
+
+	@ load base address of ARM_IDLECT1 and ARM_IDLECT2
+	mov	r4, #CLKGEN_REG_ASM_BASE & 0xff000000
+	orr	r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
+	orr	r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
+
+	@ turn off clock domains
+	@ do not disable PERCK (0x04)
+	mov	r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff
+	orr	r5, r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff00
+	strh	r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+
+	@ request ARM idle
+	mov	r3, #OMAP730_IDLECT1_SLEEP_VAL & 0xff
+	orr	r3, r3, #OMAP730_IDLECT1_SLEEP_VAL & 0xff00
+	strh	r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+
+	@ disable instruction cache
+	mrc	p15, 0, r9, c1, c0, 0
+	bic	r2, r9, #0x1000
+	mcr	p15, 0, r2, c1, c0, 0
+	nop
+
+/*
+ * Let's wait for the next wake up event to wake us up. r0 can't be
+ * used here because r0 holds ARM_IDLECT1
+ */
+	mov	r2, #0
+	mcr	p15, 0, r2, c7, c0, 4		@ wait for interrupt
+/*
+ * omap730_cpu_suspend()'s resume point.
+ *
+ * It will just start executing here, so we'll restore stuff from the
+ * stack.
+ */
+	@ re-enable Icache
+	mcr	p15, 0, r9, c1, c0, 0
+
+	@ reset the ARM_IDLECT1 and ARM_IDLECT2.
+	strh	r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+	strh	r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+
+	@ Restore EMIFF controls
+	str	r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
+	str	r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
+
+	@ restore regs and return
+	ldmfd	sp!, {r0 - r12, pc}
+
+ENTRY(omap730_cpu_suspend_sz)
+	.word	. - omap730_cpu_suspend
+#endif /* CONFIG_ARCH_OMAP730 */
+
+#ifdef CONFIG_ARCH_OMAP15XX
 ENTRY(omap1510_cpu_suspend)
 
 	@ save registers on stack
@@ -241,7 +370,7 @@
 
 ENTRY(omap1510_cpu_suspend_sz)
 	.word	. - omap1510_cpu_suspend
-#endif /* CONFIG_ARCH_OMAP1510 */
+#endif /* CONFIG_ARCH_OMAP15XX */
 
 #if defined(CONFIG_ARCH_OMAP16XX)
 ENTRY(omap1610_cpu_suspend)
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 7ad69f1..792f663 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -20,10 +20,13 @@
 #include <asm/io.h>
 #include <asm/cacheflush.h>
 
-#include "sram.h"
+#include <asm/arch/sram.h>
 
-#define OMAP1_SRAM_BASE		0xd0000000
-#define OMAP1_SRAM_START	0x20000000
+#define OMAP1_SRAM_PA		0x20000000
+#define OMAP1_SRAM_VA		0xd0000000
+#define OMAP2_SRAM_PA		0x40200000
+#define OMAP2_SRAM_VA		0xd0000000
+
 #define SRAM_BOOTLOADER_SZ	0x80
 
 static unsigned long omap_sram_base;
@@ -31,37 +34,40 @@
 static unsigned long omap_sram_ceil;
 
 /*
- * The amount of SRAM depends on the core type:
- * 730 = 200K, 1510 = 512K, 5912 = 256K, 1610 = 16K, 1710 = 16K
+ * The amount of SRAM depends on the core type.
  * Note that we cannot try to test for SRAM here because writes
  * to secure SRAM will hang the system. Also the SRAM is not
  * yet mapped at this point.
  */
 void __init omap_detect_sram(void)
 {
-	omap_sram_base = OMAP1_SRAM_BASE;
+	if (!cpu_is_omap24xx())
+		omap_sram_base = OMAP1_SRAM_VA;
+	else
+		omap_sram_base = OMAP2_SRAM_VA;
 
 	if (cpu_is_omap730())
-		omap_sram_size = 0x32000;
-	else if (cpu_is_omap1510())
-		omap_sram_size = 0x80000;
+		omap_sram_size = 0x32000;	/* 200K */
+	else if (cpu_is_omap15xx())
+		omap_sram_size = 0x30000;	/* 192K */
 	else if (cpu_is_omap1610() || cpu_is_omap1621() || cpu_is_omap1710())
-		omap_sram_size = 0x4000;
+		omap_sram_size = 0x4000;	/* 16K */
 	else if (cpu_is_omap1611())
-		omap_sram_size = 0x3e800;
+		omap_sram_size = 0x3e800;	/* 250K */
+	else if (cpu_is_omap2420())
+		omap_sram_size = 0xa0014;	/* 640K */
 	else {
 		printk(KERN_ERR "Could not detect SRAM size\n");
 		omap_sram_size = 0x4000;
 	}
 
-	printk(KERN_INFO "SRAM size: 0x%lx\n", omap_sram_size);
 	omap_sram_ceil = omap_sram_base + omap_sram_size;
 }
 
 static struct map_desc omap_sram_io_desc[] __initdata = {
 	{	/* .length gets filled in at runtime */
-		.virtual	= OMAP1_SRAM_BASE,
-		.pfn		= __phys_to_pfn(OMAP1_SRAM_START),
+		.virtual	= OMAP1_SRAM_VA,
+		.pfn		= __phys_to_pfn(OMAP1_SRAM_PA),
 		.type		= MT_DEVICE
 	}
 };
@@ -76,10 +82,19 @@
 	if (omap_sram_size == 0)
 		return;
 
+	if (cpu_is_omap24xx()) {
+		omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
+		omap_sram_io_desc[0].pfn = __phys_to_pfn(OMAP2_SRAM_PA);
+	}
+
 	omap_sram_io_desc[0].length = (omap_sram_size + PAGE_SIZE-1)/PAGE_SIZE;
 	omap_sram_io_desc[0].length *= PAGE_SIZE;
 	iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 
+	printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
+	       omap_sram_io_desc[0].pfn, omap_sram_io_desc[0].virtual,
+	       omap_sram_io_desc[0].length);
+
 	/*
 	 * Looks like we need to preserve some bootloader code at the
 	 * beginning of SRAM for jumping to flash for reboot to work...
@@ -88,16 +103,6 @@
 	       omap_sram_size - SRAM_BOOTLOADER_SZ);
 }
 
-static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl) = NULL;
-
-void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
-{
-	if (_omap_sram_reprogram_clock == NULL)
-		panic("Cannot use SRAM");
-
-	return _omap_sram_reprogram_clock(dpllctl, ckctl);
-}
-
 void * omap_sram_push(void * start, unsigned long size)
 {
 	if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
@@ -111,10 +116,94 @@
 	return (void *)omap_sram_ceil;
 }
 
-void __init omap_sram_init(void)
+static void omap_sram_error(void)
+{
+	panic("Uninitialized SRAM function\n");
+}
+
+#ifdef CONFIG_ARCH_OMAP1
+
+static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl);
+
+void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
+{
+	if (!_omap_sram_reprogram_clock)
+		omap_sram_error();
+
+	return _omap_sram_reprogram_clock(dpllctl, ckctl);
+}
+
+int __init omap1_sram_init(void)
+{
+	_omap_sram_reprogram_clock = omap_sram_push(sram_reprogram_clock,
+						    sram_reprogram_clock_sz);
+
+	return 0;
+}
+
+#else
+#define omap1_sram_init()	do {} while (0)
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2
+
+static void (*_omap2_sram_ddr_init)(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
+			      u32 base_cs, u32 force_unlock);
+
+void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
+		   u32 base_cs, u32 force_unlock)
+{
+	if (!_omap2_sram_ddr_init)
+		omap_sram_error();
+
+	return _omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl,
+				    base_cs, force_unlock);
+}
+
+static void (*_omap2_sram_reprogram_sdrc)(u32 perf_level, u32 dll_val,
+					  u32 mem_type);
+
+void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val, u32 mem_type)
+{
+	if (!_omap2_sram_reprogram_sdrc)
+		omap_sram_error();
+
+	return _omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type);
+}
+
+static u32 (*_omap2_set_prcm)(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
+
+u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass)
+{
+	if (!_omap2_set_prcm)
+		omap_sram_error();
+
+	return _omap2_set_prcm(dpll_ctrl_val, sdrc_rfr_val, bypass);
+}
+
+int __init omap2_sram_init(void)
+{
+	_omap2_sram_ddr_init = omap_sram_push(sram_ddr_init, sram_ddr_init_sz);
+
+	_omap2_sram_reprogram_sdrc = omap_sram_push(sram_reprogram_sdrc,
+						    sram_reprogram_sdrc_sz);
+	_omap2_set_prcm = omap_sram_push(sram_set_prcm, sram_set_prcm_sz);
+
+	return 0;
+}
+#else
+#define omap2_sram_init()	do {} while (0)
+#endif
+
+int __init omap_sram_init(void)
 {
 	omap_detect_sram();
 	omap_map_sram();
-	_omap_sram_reprogram_clock = omap_sram_push(sram_reprogram_clock,
-						    sram_reprogram_clock_sz);
+
+	if (!cpu_is_omap24xx())
+		omap1_sram_init();
+	else
+		omap2_sram_init();
+
+	return 0;
 }
diff --git a/arch/arm/plat-omap/sram.h b/arch/arm/plat-omap/sram.h
deleted file mode 100644
index 71984ef..0000000
--- a/arch/arm/plat-omap/sram.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * linux/arch/arm/plat-omap/sram.h
- *
- * Interface for functions that need to be run in internal SRAM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ARCH_ARM_OMAP_SRAM_H
-#define __ARCH_ARM_OMAP_SRAM_H
-
-extern void * omap_sram_push(void * start, unsigned long size);
-extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
-
-/* Do not use these */
-extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl);
-extern unsigned long sram_reprogram_clock_sz;
-
-#endif
diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c
index 205e2d0..00afc7a 100644
--- a/arch/arm/plat-omap/usb.c
+++ b/arch/arm/plat-omap/usb.c
@@ -91,6 +91,8 @@
 
 /*-------------------------------------------------------------------------*/
 
+#if defined(CONFIG_ARCH_OMAP_OTG) || defined(CONFIG_ARCH_OMAP15XX)
+
 static u32 __init omap_usb0_init(unsigned nwires, unsigned is_device)
 {
 	u32	syscon1 = 0;
@@ -271,6 +273,8 @@
 	return syscon1 << 24;
 }
 
+#endif
+
 /*-------------------------------------------------------------------------*/
 
 #if	defined(CONFIG_USB_GADGET_OMAP) || \
@@ -494,7 +498,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-#ifdef	CONFIG_ARCH_OMAP1510
+#ifdef	CONFIG_ARCH_OMAP15XX
 
 #define ULPD_DPLL_CTRL_REG	__REG16(ULPD_DPLL_CTRL)
 #define DPLL_IOB		(1 << 13)
@@ -507,7 +511,6 @@
 
 static void __init omap_1510_usb_init(struct omap_usb_config *config)
 {
-	int status;
 	unsigned int val;
 
 	omap_usb0_init(config->pins[0], is_usb0_device(config));
@@ -539,6 +542,8 @@
 
 #ifdef	CONFIG_USB_GADGET_OMAP
 	if (config->register_dev) {
+		int status;
+
 		udc_device.dev.platform_data = config;
 		status = platform_device_register(&udc_device);
 		if (status)
@@ -549,6 +554,8 @@
 
 #if	defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
 	if (config->register_host) {
+		int status;
+
 		ohci_device.dev.platform_data = config;
 		status = platform_device_register(&ohci_device);
 		if (status)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1493c78..ed31062 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -599,6 +599,10 @@
 	def_bool y
 	depends on NEED_MULTIPLE_NODES
 
+config ARCH_MEMORY_PROBE
+	def_bool y
+	depends on MEMORY_HOTPLUG
+
 # Some NUMA nodes have memory ranges that span
 # other nodes.  Even though a pfn is valid and
 # between a node's start and end pfns, it may not
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b3ae299..c04bbd3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -4,6 +4,7 @@
 
 ifeq ($(CONFIG_PPC64),y)
 EXTRA_CFLAGS	+= -mno-minimal-toc
+CFLAGS_ioctl32.o += -Ifs/
 endif
 ifeq ($(CONFIG_PPC32),y)
 CFLAGS_prom_init.o      += -fPIC
@@ -11,15 +12,21 @@
 endif
 
 obj-y				:= semaphore.o cputable.o ptrace.o syscalls.o \
-				   signal_32.o pmc.o
+				   irq.o signal_32.o pmc.o
 obj-$(CONFIG_PPC64)		+= setup_64.o binfmt_elf32.o sys_ppc32.o \
-				   signal_64.o ptrace32.o systbl.o
+				   signal_64.o ptrace32.o systbl.o \
+				   paca.o ioctl32.o cpu_setup_power4.o \
+				   firmware.o sysfs.o
 obj-$(CONFIG_ALTIVEC)		+= vecemu.o vector.o
 obj-$(CONFIG_POWER4)		+= idle_power4.o
 obj-$(CONFIG_PPC_OF)		+= of_device.o
-obj-$(CONFIG_PPC_RTAS)		+= rtas.o
+procfs-$(CONFIG_PPC64)		:= proc_ppc64.o
+obj-$(CONFIG_PROC_FS)		+= $(procfs-y)
+rtaspci-$(CONFIG_PPC64)		:= rtas_pci.o
+obj-$(CONFIG_PPC_RTAS)		+= rtas.o $(rtaspci-y)
 obj-$(CONFIG_RTAS_FLASH)	+= rtas_flash.o
 obj-$(CONFIG_RTAS_PROC)		+= rtas-proc.o
+obj-$(CONFIG_LPARCFG)		+= lparcfg.o
 obj-$(CONFIG_IBMVIO)		+= vio.o
 obj-$(CONFIG_GENERIC_TBSYNC)	+= smp-tbsync.o
 
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index b757572..8793102 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -106,7 +106,6 @@
 	DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
 	DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
 	DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
-	DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
 	DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
 
 	/* paca */
diff --git a/arch/ppc64/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S
similarity index 99%
rename from arch/ppc64/kernel/cpu_setup_power4.S
rename to arch/powerpc/kernel/cpu_setup_power4.S
index 1fb673c5..cca942f 100644
--- a/arch/ppc64/kernel/cpu_setup_power4.S
+++ b/arch/powerpc/kernel/cpu_setup_power4.S
@@ -114,11 +114,11 @@
 
 	.data
 	.balign	L1_CACHE_BYTES,0
-cpu_state_storage:	
+cpu_state_storage:
 	.space	CS_SIZE
 	.balign	L1_CACHE_BYTES,0
 	.text
-	
+
 /* Called in normal context to backup CPU 0 state. This
  * does not include cache settings. This function is also
  * called for machine sleep. This does not include the MMU
@@ -151,7 +151,7 @@
 	std	r3,CS_HID4(r5)
 	mfspr	r3,SPRN_HID5
 	std	r3,CS_HID5(r5)
-	
+
 2:
 	mtcr	r7
 	blr
@@ -213,7 +213,7 @@
 	mtspr	SPRN_HID1,r3
 	sync
 	isync
-	
+
 	/* Restore HID4 */
 	ld	r3,CS_HID4(r5)
 	sync
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index cc4e9eb..1d85ced 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -52,6 +52,9 @@
 #define COMMON_USER		(PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
 				 PPC_FEATURE_HAS_MMU)
 #define COMMON_USER_PPC64	(COMMON_USER | PPC_FEATURE_64)
+#define COMMON_USER_POWER4	(COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
+#define COMMON_USER_POWER5	(COMMON_USER_PPC64 | PPC_FEATURE_POWER5)
+#define COMMON_USER_POWER5_PLUS	(COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS)
 
 
 /* We only set the spe features if the kernel was compiled with
@@ -160,7 +163,7 @@
 		.pvr_value		= 0x00350000,
 		.cpu_name		= "POWER4 (gp)",
 		.cpu_features		= CPU_FTRS_POWER4,
-		.cpu_user_features	= COMMON_USER_PPC64,
+		.cpu_user_features	= COMMON_USER_POWER4,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 8,
@@ -175,7 +178,7 @@
 		.pvr_value		= 0x00380000,
 		.cpu_name		= "POWER4+ (gq)",
 		.cpu_features		= CPU_FTRS_POWER4,
-		.cpu_user_features	= COMMON_USER_PPC64,
+		.cpu_user_features	= COMMON_USER_POWER4,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 8,
@@ -190,7 +193,7 @@
 		.pvr_value		= 0x00390000,
 		.cpu_name		= "PPC970",
 		.cpu_features		= CPU_FTRS_PPC970,
-		.cpu_user_features	= COMMON_USER_PPC64 |
+		.cpu_user_features	= COMMON_USER_POWER4 |
 			PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -212,7 +215,7 @@
 #else
 		.cpu_features		= CPU_FTRS_PPC970,
 #endif
-		.cpu_user_features	= COMMON_USER_PPC64 |
+		.cpu_user_features	= COMMON_USER_POWER4 |
 			PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -230,7 +233,7 @@
 		.pvr_value		= 0x00440000,
 		.cpu_name		= "PPC970MP",
 		.cpu_features		= CPU_FTRS_PPC970,
-		.cpu_user_features	= COMMON_USER_PPC64 |
+		.cpu_user_features	= COMMON_USER_POWER4 |
 			PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -245,7 +248,7 @@
 		.pvr_value		= 0x003a0000,
 		.cpu_name		= "POWER5 (gr)",
 		.cpu_features		= CPU_FTRS_POWER5,
-		.cpu_user_features	= COMMON_USER_PPC64,
+		.cpu_user_features	= COMMON_USER_POWER5,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 6,
@@ -260,7 +263,7 @@
 		.pvr_value		= 0x003b0000,
 		.cpu_name		= "POWER5 (gs)",
 		.cpu_features		= CPU_FTRS_POWER5,
-		.cpu_user_features	= COMMON_USER_PPC64,
+		.cpu_user_features	= COMMON_USER_POWER5_PLUS,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 6,
@@ -276,7 +279,7 @@
 		.cpu_name		= "Cell Broadband Engine",
 		.cpu_features		= CPU_FTRS_CELL,
 		.cpu_user_features	= COMMON_USER_PPC64 |
-			PPC_FEATURE_HAS_ALTIVEC_COMP,
+			PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.cpu_setup		= __setup_cpu_be,
diff --git a/arch/ppc64/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
similarity index 97%
rename from arch/ppc64/kernel/firmware.c
rename to arch/powerpc/kernel/firmware.c
index d8432c0..65eae75 100644
--- a/arch/ppc64/kernel/firmware.c
+++ b/arch/powerpc/kernel/firmware.c
@@ -1,6 +1,4 @@
 /*
- *  arch/ppc64/kernel/firmware.c
- *
  *  Extracted from cputable.c
  *
  *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 4d6001f..b780b42 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -41,20 +41,20 @@
 #ifndef CONFIG_SMP
 	LOADBASE(r3, last_task_used_math)
 	toreal(r3)
-	LDL	r4,OFF(last_task_used_math)(r3)
-	CMPI	0,r4,0
+	PPC_LL	r4,OFF(last_task_used_math)(r3)
+	PPC_LCMPI	0,r4,0
 	beq	1f
 	toreal(r4)
 	addi	r4,r4,THREAD		/* want last_task_used_math->thread */
 	SAVE_32FPRS(0, r4)
 	mffs	fr0
 	stfd	fr0,THREAD_FPSCR(r4)
-	LDL	r5,PT_REGS(r4)
+	PPC_LL	r5,PT_REGS(r4)
 	toreal(r5)
-	LDL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 	li	r10,MSR_FP|MSR_FE0|MSR_FE1
 	andc	r4,r4,r10		/* disable FP for previous task */
-	STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
 #endif /* CONFIG_SMP */
 	/* enable use of FP after return */
@@ -77,7 +77,7 @@
 #ifndef CONFIG_SMP
 	subi	r4,r5,THREAD
 	fromreal(r4)
-	STL	r4,OFF(last_task_used_math)(r3)
+	PPC_STL	r4,OFF(last_task_used_math)(r3)
 #endif /* CONFIG_SMP */
 	/* restore registers and return */
 	/* we haven't used ctr or xer or lr */
@@ -97,24 +97,24 @@
 	MTMSRD(r5)			/* enable use of fpu now */
 	SYNC_601
 	isync
-	CMPI	0,r3,0
+	PPC_LCMPI	0,r3,0
 	beqlr-				/* if no previous owner, done */
 	addi	r3,r3,THREAD	        /* want THREAD of task */
-	LDL	r5,PT_REGS(r3)
-	CMPI	0,r5,0
+	PPC_LL	r5,PT_REGS(r3)
+	PPC_LCMPI	0,r5,0
 	SAVE_32FPRS(0, r3)
 	mffs	fr0
 	stfd	fr0,THREAD_FPSCR(r3)
 	beq	1f
-	LDL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 	li	r3,MSR_FP|MSR_FE0|MSR_FE1
 	andc	r4,r4,r3		/* disable FP for previous task */
-	STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
 #ifndef CONFIG_SMP
 	li	r5,0
 	LOADBASE(r4,last_task_used_math)
-	STL	r5,OFF(last_task_used_math)(r4)
+	PPC_STL	r5,OFF(last_task_used_math)(r4)
 #endif /* CONFIG_SMP */
 	blr
 
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 16ab40d..8a8bf79 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -28,7 +28,6 @@
 #include <asm/reg.h>
 #include <asm/page.h>
 #include <asm/mmu.h>
-#include <asm/systemcfg.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/bug.h>
@@ -1697,25 +1696,14 @@
  *   SPRG3 = paca virtual address
  */
 _GLOBAL(__secondary_start)
+	/* Set thread priority to MEDIUM */
+	HMT_MEDIUM
 
-	HMT_MEDIUM			/* Set thread priority to MEDIUM */
-
+	/* Load TOC */
 	ld	r2,PACATOC(r13)
-	li	r6,0
-	stb	r6,PACAPROCENABLED(r13)
 
-#ifndef CONFIG_PPC_ISERIES
-	/* Initialize the page table pointer register. */
-	LOADADDR(r6,_SDR1)
-	ld	r6,0(r6)		/* get the value of _SDR1	 */
-	mtspr	SPRN_SDR1,r6			/* set the htab location	 */
-#endif
-	/* Initialize the first segment table (or SLB) entry		 */
-	ld	r3,PACASTABVIRT(r13)	/* get addr of segment table	 */
-BEGIN_FTR_SECTION
-	bl	.stab_initialize
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-	bl	.slb_initialize
+	/* Do early setup for that CPU (stab, slb, hash table pointer) */
+	bl	.early_setup_secondary
 
 	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
 	LOADADDR(r3,current_set)
@@ -1724,37 +1712,7 @@
 	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 	std	r1,PACAKSAVE(r13)
 
-	ld	r3,PACASTABREAL(r13)	/* get raddr of segment table	 */
-	ori	r4,r3,1			/* turn on valid bit		 */
-
-#ifdef CONFIG_PPC_ISERIES
-	li	r0,-1			/* hypervisor call */
-	li	r3,1
-	sldi	r3,r3,63		/* 0x8000000000000000 */
-	ori	r3,r3,4			/* 0x8000000000000004 */
-	sc				/* HvCall_setASR */
-#else
-	/* set the ASR */
-	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg	 */
-	ld	r3,0(r3)
-	lwz	r3,PLATFORM(r3)		/* r3 = platform flags		 */
-	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
-	beq	98f			/* branch if result is 0  */
-	mfspr	r3,SPRN_PVR
-	srwi	r3,r3,16
-	cmpwi	r3,0x37			/* SStar  */
-	beq	97f
-	cmpwi	r3,0x36			/* IStar  */
-	beq	97f
-	cmpwi	r3,0x34			/* Pulsar */
-	bne	98f
-97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
-	HVSC				/* Invoking hcall */
-	b	99f
-98:					/* !(rpa hypervisor) || !(star)  */
-	mtasr	r4			/* set the stab location	 */
-99:
-#endif
+	/* Clear backchain so we get nice backtraces */
 	li	r7,0
 	mtlr	r7
 
@@ -1777,6 +1735,7 @@
 	li	r3,0
 	std	r3,0(r1)		/* Zero the stack frame pointer	*/
 	bl	.start_secondary
+	b	.
 #endif
 
 /*
@@ -1896,40 +1855,6 @@
 	mr	r3,r31
  	bl	.early_setup
 
-	/* set the ASR */
-	ld	r3,PACASTABREAL(r13)
-	ori	r4,r3,1			/* turn on valid bit		 */
-	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
-	ld	r3,0(r3)
-	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
-	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
-	beq	98f			/* branch if result is 0  */
-	mfspr	r3,SPRN_PVR
-	srwi	r3,r3,16
-	cmpwi	r3,0x37			/* SStar */
-	beq	97f
-	cmpwi	r3,0x36			/* IStar  */
-	beq	97f
-	cmpwi	r3,0x34			/* Pulsar */
-	bne	98f
-97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
-	HVSC				/* Invoking hcall */
-	b	99f
-98:					/* !(rpa hypervisor) || !(star) */
-	mtasr	r4			/* set the stab location	*/
-99:
-	/* Set SDR1 (hash table pointer) */
-	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
-	ld	r3,0(r3)
-	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
-	/* Test if bit 0 is set (LPAR bit) */
-	andi.	r3,r3,PLATFORM_LPAR
-	bne	98f			/* branch if result is !0  */
-	LOADADDR(r6,_SDR1)		/* Only if NOT LPAR */
-	add	r6,r6,r26
-	ld	r6,0(r6)		/* get the value of _SDR1 */
-	mtspr	SPRN_SDR1,r6			/* set the htab location  */
-98: 
 	LOADADDR(r3,.start_here_common)
 	SET_REG_TO_CONST(r4, MSR_KERNEL)
 	mtspr	SPRN_SRR0,r3
diff --git a/arch/ppc64/kernel/ioctl32.c b/arch/powerpc/kernel/ioctl32.c
similarity index 99%
rename from arch/ppc64/kernel/ioctl32.c
rename to arch/powerpc/kernel/ioctl32.c
index ba4a899..3fa6a93 100644
--- a/arch/ppc64/kernel/ioctl32.c
+++ b/arch/powerpc/kernel/ioctl32.c
@@ -1,6 +1,6 @@
-/* 
+/*
  * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- * 
+ *
  * Based on sparc64 ioctl32.c by:
  *
  * Copyright (C) 1997-2000  Jakub Jelinek  (jakub@redhat.com)
diff --git a/arch/ppc64/kernel/irq.c b/arch/powerpc/kernel/irq.c
similarity index 70%
rename from arch/ppc64/kernel/irq.c
rename to arch/powerpc/kernel/irq.c
index 8747458..4b79406 100644
--- a/arch/ppc64/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -5,12 +5,12 @@
  *    Copyright (C) 1992 Linus Torvalds
  *  Adapted from arch/i386 by Gary Thomas
  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *  Updated and modified by Cort Dougan (cort@cs.nmt.edu)
- *    Copyright (C) 1996 Cort Dougan
+ *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
+ *    Copyright (C) 1996-2001 Cort Dougan
  *  Adapted for Power Macintosh by Paul Mackerras
  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version
@@ -21,6 +21,14 @@
  * instead of just grabbing them. Thus setups with different IRQ numbers
  * shouldn't result in any weird surprises, and installing new handlers
  * should be easier.
+ *
+ * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
+ * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
+ * mask register (of which only 16 are defined), hence the weird shifting
+ * and complement of the cached_irq_mask.  I want to be able to stuff
+ * this right into the SIU SMASK register.
+ * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
+ * to reduce code space and undefined function references.
  */
 
 #include <linux/errno.h>
@@ -29,6 +37,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
+#include <linux/ptrace.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
 #include <linux/timex.h>
@@ -40,9 +49,13 @@
 #include <linux/irq.h>
 #include <linux/proc_fs.h>
 #include <linux/random.h>
-#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/cpumask.h>
 #include <linux/profile.h>
 #include <linux/bitops.h>
+#ifdef CONFIG_PPC64
+#include <linux/kallsyms.h>
+#endif
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -52,35 +65,54 @@
 #include <asm/cache.h>
 #include <asm/prom.h>
 #include <asm/ptrace.h>
-#include <asm/iseries/it_lp_queue.h>
 #include <asm/machdep.h>
+#ifdef CONFIG_PPC64
+#include <asm/iseries/it_lp_queue.h>
 #include <asm/paca.h>
-
-#ifdef CONFIG_SMP
-extern void iSeries_smp_message_recv( struct pt_regs * );
 #endif
 
-extern irq_desc_t irq_desc[NR_IRQS];
+static int ppc_spurious_interrupts;
+
+#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
+extern void iSeries_smp_message_recv(struct pt_regs *);
+#endif
+
+#ifdef CONFIG_PPC32
+#define NR_MASK_WORDS	((NR_IRQS + 31) / 32)
+
+unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+atomic_t ppc_n_lost_interrupts;
+
+#ifdef CONFIG_TAU_INT
+extern int tau_initialized;
+extern int tau_interrupts(int);
+#endif
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
+extern atomic_t ipi_recv;
+extern atomic_t ipi_sent;
+#endif
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC64
 EXPORT_SYMBOL(irq_desc);
 
 int distribute_irqs = 1;
 int __irq_offset_value;
-int ppc_spurious_interrupts;
 u64 ppc64_interrupt_controller;
+#endif /* CONFIG_PPC64 */
 
 int show_interrupts(struct seq_file *p, void *v)
 {
-	int i = *(loff_t *) v, j;
-	struct irqaction * action;
+	int i = *(loff_t *)v, j;
+	struct irqaction *action;
 	irq_desc_t *desc;
 	unsigned long flags;
 
 	if (i == 0) {
-		seq_printf(p, "           ");
-		for (j=0; j<NR_CPUS; j++) {
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ",j);
-		}
+		seq_puts(p, "           ");
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ", j);
 		seq_putc(p, '\n');
 	}
 
@@ -92,26 +124,41 @@
 			goto skip;
 		seq_printf(p, "%3d: ", i);
 #ifdef CONFIG_SMP
-		for (j = 0; j < NR_CPUS; j++) {
-			if (cpu_online(j))
-				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-		}
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #else
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #endif /* CONFIG_SMP */
 		if (desc->handler)
-			seq_printf(p, " %s ", desc->handler->typename );
+			seq_printf(p, " %s ", desc->handler->typename);
 		else
-			seq_printf(p, "  None      ");
+			seq_puts(p, "  None      ");
 		seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
-		seq_printf(p, "    %s",action->name);
-		for (action=action->next; action; action = action->next)
+		seq_printf(p, "    %s", action->name);
+		for (action = action->next; action; action = action->next)
 			seq_printf(p, ", %s", action->name);
 		seq_putc(p, '\n');
 skip:
 		spin_unlock_irqrestore(&desc->lock, flags);
-	} else if (i == NR_IRQS)
+	} else if (i == NR_IRQS) {
+#ifdef CONFIG_PPC32
+#ifdef CONFIG_TAU_INT
+		if (tau_initialized){
+			seq_puts(p, "TAU: ");
+			for (j = 0; j < NR_CPUS; j++)
+				if (cpu_online(j))
+					seq_printf(p, "%10u ", tau_interrupts(j));
+			seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
+		}
+#endif
+#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
+		/* should this be per processor send/receive? */
+		seq_printf(p, "IPI (recv/sent): %10u/%u\n",
+				atomic_read(&ipi_recv), atomic_read(&ipi_sent));
+#endif
+#endif /* CONFIG_PPC32 */
 		seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
+	}
 	return 0;
 }
 
@@ -144,126 +191,6 @@
 }
 #endif
 
-extern int noirqdebug;
-
-/*
- * Eventually, this should take an array of interrupts and an array size
- * so it can dispatch multiple interrupts.
- */
-void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
-{
-	int status;
-	struct irqaction *action;
-	int cpu = smp_processor_id();
-	irq_desc_t *desc = get_irq_desc(irq);
-	irqreturn_t action_ret;
-#ifdef CONFIG_IRQSTACKS
-	struct thread_info *curtp, *irqtp;
-#endif
-
-	kstat_cpu(cpu).irqs[irq]++;
-
-	if (desc->status & IRQ_PER_CPU) {
-		/* no locking required for CPU-local interrupts: */
-		ack_irq(irq);
-		action_ret = handle_IRQ_event(irq, regs, desc->action);
-		desc->handler->end(irq);
-		return;
-	}
-
-	spin_lock(&desc->lock);
-	ack_irq(irq);	
-	/*
-	   REPLAY is when Linux resends an IRQ that was dropped earlier
-	   WAITING is used by probe to mark irqs that are being tested
-	   */
-	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
-	status |= IRQ_PENDING; /* we _want_ to handle it */
-
-	/*
-	 * If the IRQ is disabled for whatever reason, we cannot
-	 * use the action we have.
-	 */
-	action = NULL;
-	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
-		action = desc->action;
-		if (!action || !action->handler) {
-			ppc_spurious_interrupts++;
-			printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
-			/* We can't call disable_irq here, it would deadlock */
-			if (!desc->depth)
-				desc->depth = 1;
-			desc->status |= IRQ_DISABLED;
-			/* This is not a real spurrious interrupt, we
-			 * have to eoi it, so we jump to out
-			 */
-			mask_irq(irq);
-			goto out;
-		}
-		status &= ~IRQ_PENDING; /* we commit to handling */
-		status |= IRQ_INPROGRESS; /* we are handling it */
-	}
-	desc->status = status;
-
-	/*
-	 * If there is no IRQ handler or it was disabled, exit early.
-	   Since we set PENDING, if another processor is handling
-	   a different instance of this same irq, the other processor
-	   will take care of it.
-	 */
-	if (unlikely(!action))
-		goto out;
-
-	/*
-	 * Edge triggered interrupts need to remember
-	 * pending events.
-	 * This applies to any hw interrupts that allow a second
-	 * instance of the same irq to arrive while we are in do_IRQ
-	 * or in the handler. But the code here only handles the _second_
-	 * instance of the irq, not the third or fourth. So it is mostly
-	 * useful for irq hardware that does not mask cleanly in an
-	 * SMP environment.
-	 */
-	for (;;) {
-		spin_unlock(&desc->lock);
-
-#ifdef CONFIG_IRQSTACKS
-		/* Switch to the irq stack to handle this */
-		curtp = current_thread_info();
-		irqtp = hardirq_ctx[smp_processor_id()];
-		if (curtp != irqtp) {
-			irqtp->task = curtp->task;
-			irqtp->flags = 0;
-			action_ret = call_handle_IRQ_event(irq, regs, action, irqtp);
-			irqtp->task = NULL;
-			if (irqtp->flags)
-				set_bits(irqtp->flags, &curtp->flags);
-		} else
-#endif
-			action_ret = handle_IRQ_event(irq, regs, action);
-
-		spin_lock(&desc->lock);
-		if (!noirqdebug)
-			note_interrupt(irq, desc, action_ret, regs);
-		if (likely(!(desc->status & IRQ_PENDING)))
-			break;
-		desc->status &= ~IRQ_PENDING;
-	}
-out:
-	desc->status &= ~IRQ_INPROGRESS;
-	/*
-	 * The ->end() handler has to deal with interrupts which got
-	 * disabled while the handler was running.
-	 */
-	if (desc->handler) {
-		if (desc->handler->end)
-			desc->handler->end(irq);
-		else if (desc->handler->enable)
-			desc->handler->enable(irq);
-	}
-	spin_unlock(&desc->lock);
-}
-
 #ifdef CONFIG_PPC_ISERIES
 void do_IRQ(struct pt_regs *regs)
 {
@@ -310,8 +237,11 @@
 void do_IRQ(struct pt_regs *regs)
 {
 	int irq;
+#ifdef CONFIG_IRQSTACKS
+	struct thread_info *curtp, *irqtp;
+#endif
 
-	irq_enter();
+        irq_enter();
 
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 	/* Debugging check for stack overflow: is there less than 2KB free? */
@@ -328,20 +258,44 @@
 	}
 #endif
 
+	/*
+	 * Every platform is required to implement ppc_md.get_irq.
+	 * This function will either return an irq number or -1 to
+	 * indicate there are no more pending.
+	 * The value -2 is for buggy hardware and means that this IRQ
+	 * has already been handled. -- Tom
+	 */
 	irq = ppc_md.get_irq(regs);
 
-	if (irq >= 0)
-		ppc_irq_dispatch_handler(regs, irq);
-	else
-		/* That's not SMP safe ... but who cares ? */
-		ppc_spurious_interrupts++;
-
-	irq_exit();
+	if (irq >= 0) {
+#ifdef CONFIG_IRQSTACKS
+		/* Switch to the irq stack to handle this */
+		curtp = current_thread_info();
+		irqtp = hardirq_ctx[smp_processor_id()];
+		if (curtp != irqtp) {
+			irqtp->task = curtp->task;
+			irqtp->flags = 0;
+			call___do_IRQ(irq, regs, irqtp);
+			irqtp->task = NULL;
+			if (irqtp->flags)
+				set_bits(irqtp->flags, &curtp->flags);
+		} else
+#endif
+			__do_IRQ(irq, regs);
+	} else
+#ifdef CONFIG_PPC32
+		if (irq != -2)
+#endif
+			/* That's not SMP safe ... but who cares ? */
+			ppc_spurious_interrupts++;
+        irq_exit();
 }
+
 #endif	/* CONFIG_PPC_ISERIES */
 
 void __init init_IRQ(void)
 {
+#ifdef CONFIG_PPC64
 	static int once = 0;
 
 	if (once)
@@ -349,10 +303,14 @@
 
 	once++;
 
+#endif
 	ppc_md.init_IRQ();
+#ifdef CONFIG_PPC64
 	irq_ctx_init();
+#endif
 }
 
+#ifdef CONFIG_PPC64
 #ifndef CONFIG_PPC_ISERIES
 /*
  * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
@@ -517,3 +475,4 @@
 }
 
 __setup("noirqdistrib", setup_noirqdistrib);
+#endif /* CONFIG_PPC64 */
diff --git a/arch/ppc64/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
similarity index 98%
rename from arch/ppc64/kernel/lparcfg.c
rename to arch/powerpc/kernel/lparcfg.c
index 3e7b2f2..5e954fa 100644
--- a/arch/ppc64/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,6 +35,7 @@
 #include <asm/time.h>
 #include <asm/iseries/it_exp_vpd_panel.h>
 #include <asm/prom.h>
+#include <asm/systemcfg.h>
 
 #define MODULE_VERS "1.6"
 #define MODULE_NAME "lparcfg"
@@ -96,7 +97,7 @@
 
 #define lparcfg_write NULL
 
-/* 
+/*
  * Methods used to fetch LPAR data when running on an iSeries platform.
  */
 static int lparcfg_data(struct seq_file *m, void *v)
@@ -168,7 +169,7 @@
 #endif				/* CONFIG_PPC_ISERIES */
 
 #ifdef CONFIG_PPC_PSERIES
-/* 
+/*
  * Methods used to fetch LPAR data when running on a pSeries platform.
  */
 
@@ -177,7 +178,7 @@
  *  entitled_capacity,unallocated_capacity,
  *  aggregation, resource_capability).
  *
- *  R4 = Entitled Processor Capacity Percentage. 
+ *  R4 = Entitled Processor Capacity Percentage.
  *  R5 = Unallocated Processor Capacity Percentage.
  *  R6 (AABBCCDDEEFFGGHH).
  *      XXXX - reserved (0)
@@ -190,7 +191,7 @@
  *          XX - variable processor Capacity Weight
  *            XX - Unallocated Variable Processor Capacity Weight.
  *              XXXX - Active processors in Physical Processor Pool.
- *                  XXXX  - Processors active on platform. 
+ *                  XXXX  - Processors active on platform.
  */
 static unsigned int h_get_ppp(unsigned long *entitled,
 			      unsigned long *unallocated,
@@ -273,7 +274,7 @@
 		if (!workbuffer) {
 			printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
 			       __FILE__, __FUNCTION__, __LINE__);
-			kfree(local_buffer);			
+			kfree(local_buffer);
 			return;
 		}
 #ifdef LPARCFG_DEBUG
@@ -371,7 +372,7 @@
 	lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL);
 
 	if (lrdrp == NULL) {
-		partition_potential_processors = systemcfg->processorCount;
+		partition_potential_processors = _systemcfg->processorCount;
 	} else {
 		partition_potential_processors = *(lrdrp + 4);
 	}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 3bedb53..f6d84a7 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -519,7 +519,7 @@
  *
  * flush_icache_range(unsigned long start, unsigned long stop)
  */
-_GLOBAL(flush_icache_range)
+_GLOBAL(__flush_icache_range)
 BEGIN_FTR_SECTION
 	blr				/* for 601, do nothing */
 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
@@ -607,27 +607,6 @@
 	sync				/* wait for dcbi's to get to ram */
 	blr
 
-#ifdef CONFIG_NOT_COHERENT_CACHE
-/*
- * 40x cores have 8K or 16K dcache and 32 byte line size.
- * 44x has a 32K dcache and 32 byte line size.
- * 8xx has 1, 2, 4, 8K variants.
- * For now, cover the worst case of the 44x.
- * Must be called with external interrupts disabled.
- */
-#define CACHE_NWAYS	64
-#define CACHE_NLINES	16
-
-_GLOBAL(flush_dcache_all)
-	li	r4, (2 * CACHE_NWAYS * CACHE_NLINES)
-	mtctr	r4
-	lis     r5, KERNELBASE@h
-1:	lwz	r3, 0(r5)		/* Load one word from every line */
-	addi	r5, r5, L1_CACHE_BYTES
-	bdnz    1b
-	blr
-#endif /* CONFIG_NOT_COHERENT_CACHE */
-
 /*
  * Flush a particular page from the data cache to RAM.
  * Note: this is necessary because the instruction cache does *not*
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae1433d..ae48a00 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -89,12 +89,12 @@
 	mtlr	r0
 	blr
 
-_GLOBAL(call_handle_IRQ_event)
+_GLOBAL(call___do_IRQ)
 	mflr	r0
 	std	r0,16(r1)
-	stdu	r1,THREAD_SIZE-112(r6)
-	mr	r1,r6
-	bl	.handle_IRQ_event
+	stdu	r1,THREAD_SIZE-112(r5)
+	mr	r1,r5
+	bl	.__do_IRQ
 	ld	r1,0(r1)
 	ld	r0,16(r1)
 	mtlr	r0
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/powerpc/kernel/paca.c
similarity index 97%
rename from arch/ppc64/kernel/pacaData.c
rename to arch/powerpc/kernel/paca.c
index 3133c72..3cf2517 100644
--- a/arch/ppc64/kernel/pacaData.c
+++ b/arch/powerpc/kernel/paca.c
@@ -15,7 +15,7 @@
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <asm/page.h>
-
+#include <asm/systemcfg.h>
 #include <asm/lppaca.h>
 #include <asm/iseries/it_lp_queue.h>
 #include <asm/paca.h>
@@ -24,15 +24,14 @@
 	struct systemcfg	data;
 	u8			page[PAGE_SIZE];
 } systemcfg_store __attribute__((__section__(".data.page.aligned")));
-struct systemcfg *systemcfg = &systemcfg_store.data;
-EXPORT_SYMBOL(systemcfg);
+struct systemcfg *_systemcfg = &systemcfg_store.data;
 
 
 /* This symbol is provided by the linker - let it fill in the paca
  * field correctly */
 extern unsigned long __toc_start;
 
-/* The Paca is an array with one entry per processor.  Each contains an 
+/* The Paca is an array with one entry per processor.  Each contains an
  * lppaca, which contains the information shared between the
  * hypervisor and Linux.  Each also contains an ItLpRegSave area which
  * is used by the hypervisor to save registers.
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 47d6f7e..5dcf4ba 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -44,6 +44,7 @@
 #include <asm/cputable.h>
 #include <asm/btext.h>
 #include <asm/div64.h>
+#include <asm/signal.h>
 
 #ifdef  CONFIG_8xx
 #include <asm/commproc.h>
@@ -56,7 +57,6 @@
 extern void alignment_exception(struct pt_regs *regs);
 extern void program_check_exception(struct pt_regs *regs);
 extern void single_step_exception(struct pt_regs *regs);
-extern int do_signal(sigset_t *, struct pt_regs *);
 extern int pmac_newworld;
 extern int sys_sigreturn(struct pt_regs *regs);
 
@@ -188,9 +188,6 @@
 EXPORT_SYMBOL(cuda_request);
 EXPORT_SYMBOL(cuda_poll);
 #endif /* CONFIG_ADB_CUDA */
-#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
-EXPORT_SYMBOL(_machine);
-#endif
 #ifdef CONFIG_PPC_PMAC
 EXPORT_SYMBOL(sys_ctrler);
 #endif
diff --git a/arch/ppc64/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
similarity index 95%
rename from arch/ppc64/kernel/proc_ppc64.c
rename to arch/powerpc/kernel/proc_ppc64.c
index 24e955ee..a1c1950 100644
--- a/arch/ppc64/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -1,18 +1,16 @@
 /*
- * arch/ppc64/kernel/proc_ppc64.c
- *
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
@@ -53,7 +51,7 @@
 	if (!root)
 		return 1;
 
-	if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_CELL)))
+	if (!(platform_is_pseries() || _machine == PLATFORM_CELL))
 		return 0;
 
 	if (!proc_mkdir("rtas", root))
@@ -74,7 +72,7 @@
 	if (!pde)
 		return 1;
 	pde->nlink = 1;
-	pde->data = systemcfg;
+	pde->data = _systemcfg;
 	pde->size = PAGE_SIZE;
 	pde->proc_fops = &page_map_fops;
 
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f645adb..6a5b468 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -48,9 +48,6 @@
 #include <asm/machdep.h>
 #include <asm/pSeries_reconfig.h>
 #include <asm/pci-bridge.h>
-#ifdef CONFIG_PPC64
-#include <asm/systemcfg.h>
-#endif
 
 #ifdef DEBUG
 #define DBG(fmt...) printk(KERN_ERR fmt)
@@ -74,10 +71,6 @@
 typedef int interpret_func(struct device_node *, unsigned long *,
 			   int, int, int);
 
-extern struct rtas_t rtas;
-extern struct lmb lmb;
-extern unsigned long klimit;
-
 static int __initdata dt_root_addr_cells;
 static int __initdata dt_root_size_cells;
 
@@ -391,7 +384,7 @@
 
 #ifdef CONFIG_PPC64
 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
-		if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
+		if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
 			char *name = get_property(ic->parent, "name", NULL);
 			if (name && !strcmp(name, "u3"))
 				np->intrs[intrcount].line += 128;
@@ -1087,9 +1080,9 @@
 static int __init early_init_dt_scan_cpus(unsigned long node,
 					  const char *uname, int depth, void *data)
 {
-	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 	u32 *prop;
-	unsigned long size = 0;
+	unsigned long size;
+	char *type = of_get_flat_dt_prop(node, "device_type", &size);
 
 	/* We are scanning "cpu" nodes only */
 	if (type == NULL || strcmp(type, "cpu") != 0)
@@ -1115,7 +1108,7 @@
 
 #ifdef CONFIG_ALTIVEC
 	/* Check if we have a VMX and eventually update CPU features */
-	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size);
+	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
 	if (prop && (*prop) > 0) {
 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
@@ -1161,13 +1154,9 @@
 	prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
 	if (prop == NULL)
 		return 0;
-#ifdef CONFIG_PPC64
-	systemcfg->platform = *prop;
-#else
 #ifdef CONFIG_PPC_MULTIPLATFORM
 	_machine = *prop;
 #endif
-#endif
 
 #ifdef CONFIG_PPC64
 	/* check if iommu is forced on or off */
@@ -1264,7 +1253,14 @@
 	unsigned long l;
 
 	/* We are scanning "memory" nodes only */
-	if (type == NULL || strcmp(type, "memory") != 0)
+	if (type == NULL) {
+		/*
+		 * The longtrail doesn't have a device_type on the
+		 * /memory node, so look for the node called /memory@0.
+		 */
+		if (depth != 1 || strcmp(uname, "memory@0") != 0)
+			return 0;
+	} else if (strcmp(type, "memory") != 0)
 		return 0;
 
 	reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
@@ -1339,9 +1335,6 @@
 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
 	lmb_enforce_memory_limit(memory_limit);
 	lmb_analyze();
-#ifdef CONFIG_PPC64
-	systemcfg->physicalMemorySize = lmb_phys_mem_size();
-#endif
 	lmb_reserve(0, __pa(klimit));
 
 	DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
@@ -1908,7 +1901,7 @@
 	/* We don't support that function on PowerMac, at least
 	 * not yet
 	 */
-	if (systemcfg->platform == PLATFORM_POWERMAC)
+	if (_machine == PLATFORM_POWERMAC)
 		return -ENODEV;
 
 	/* fix up new node's linux_phandle field */
@@ -1992,9 +1985,11 @@
 	*next = prop;
 	write_unlock(&devtree_lock);
 
+#ifdef CONFIG_PROC_DEVICETREE
 	/* try to add to proc as well if it was initialized */
 	if (np->pde)
 		proc_device_tree_add_prop(np->pde, prop);
+#endif /* CONFIG_PROC_DEVICETREE */
 
 	return 0;
 }
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 6dc33d1..4ce0105 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -94,11 +94,17 @@
 #ifdef CONFIG_PPC64
 #define RELOC(x)        (*PTRRELOC(&(x)))
 #define ADDR(x)		(u32) add_reloc_offset((unsigned long)(x))
+#define OF_WORKAROUNDS	0
 #else
 #define RELOC(x)	(x)
 #define ADDR(x)		(u32) (x)
+#define OF_WORKAROUNDS	of_workarounds
+int of_workarounds;
 #endif
 
+#define OF_WA_CLAIM	1	/* do phys/virt claim separately, then map */
+#define OF_WA_LONGTRAIL	2	/* work around longtrail bugs */
+
 #define PROM_BUG() do {						\
         prom_printf("kernel BUG at %s line 0x%x!\n",		\
 		    RELOC(__FILE__), __LINE__);			\
@@ -111,11 +117,6 @@
 #define prom_debug(x...)
 #endif
 
-#ifdef CONFIG_PPC32
-#define PLATFORM_POWERMAC	_MACH_Pmac
-#define PLATFORM_CHRP		_MACH_chrp
-#endif
-
 
 typedef u32 prom_arg_t;
 
@@ -128,10 +129,11 @@
 
 struct prom_t {
 	ihandle root;
-	ihandle chosen;
+	phandle chosen;
 	int cpu;
 	ihandle stdout;
 	ihandle mmumap;
+	ihandle memory;
 };
 
 struct mem_map_entry {
@@ -360,16 +362,36 @@
 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
 				unsigned long align)
 {
-	int ret;
 	struct prom_t *_prom = &RELOC(prom);
 
-	ret = call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
-			(prom_arg_t)align);
-	if (ret != -1 && _prom->mmumap != 0)
-		/* old pmacs need us to map as well */
+	if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
+		/*
+		 * Old OF requires we claim physical and virtual separately
+		 * and then map explicitly (assuming virtual mode)
+		 */
+		int ret;
+		prom_arg_t result;
+
+		ret = call_prom_ret("call-method", 5, 2, &result,
+				    ADDR("claim"), _prom->memory,
+				    align, size, virt);
+		if (ret != 0 || result == -1)
+			return -1;
+		ret = call_prom_ret("call-method", 5, 2, &result,
+				    ADDR("claim"), _prom->mmumap,
+				    align, size, virt);
+		if (ret != 0) {
+			call_prom("call-method", 4, 1, ADDR("release"),
+				  _prom->memory, size, virt);
+			return -1;
+		}
+		/* the 0x12 is M (coherence) + PP == read/write */
 		call_prom("call-method", 6, 1,
-			  ADDR("map"), _prom->mmumap, 0, size, virt, virt);
-	return ret;
+			  ADDR("map"), _prom->mmumap, 0x12, size, virt, virt);
+		return virt;
+	}
+	return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
+			 (prom_arg_t)align);
 }
 
 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
@@ -415,11 +437,52 @@
 	return call_prom("getproplen", 2, 1, node, ADDR(pname));
 }
 
-static int inline prom_setprop(phandle node, const char *pname,
-			       void *value, size_t valuelen)
+static void add_string(char **str, const char *q)
 {
-	return call_prom("setprop", 4, 1, node, ADDR(pname),
-			 (u32)(unsigned long) value, (u32) valuelen);
+	char *p = *str;
+
+	while (*q)
+		*p++ = *q++;
+	*p++ = ' ';
+	*str = p;
+}
+
+static char *tohex(unsigned int x)
+{
+	static char digits[] = "0123456789abcdef";
+	static char result[9];
+	int i;
+
+	result[8] = 0;
+	i = 8;
+	do {
+		--i;
+		result[i] = digits[x & 0xf];
+		x >>= 4;
+	} while (x != 0 && i > 0);
+	return &result[i];
+}
+
+static int __init prom_setprop(phandle node, const char *nodename,
+			       const char *pname, void *value, size_t valuelen)
+{
+	char cmd[256], *p;
+
+	if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
+		return call_prom("setprop", 4, 1, node, ADDR(pname),
+				 (u32)(unsigned long) value, (u32) valuelen);
+
+	/* gah... setprop doesn't work on longtrail, have to use interpret */
+	p = cmd;
+	add_string(&p, "dev");
+	add_string(&p, nodename);
+	add_string(&p, tohex((u32)(unsigned long) value));
+	add_string(&p, tohex(valuelen));
+	add_string(&p, tohex(ADDR(pname)));
+	add_string(&p, tohex(strlen(RELOC(pname))));
+	add_string(&p, "property");
+	*p = 0;
+	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
 }
 
 /* We can't use the standard versions because of RELOC headaches. */
@@ -980,7 +1043,7 @@
 
 	rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
 	if (!IHANDLE_VALID(rtas_inst)) {
-		prom_printf("opening rtas package failed");
+		prom_printf("opening rtas package failed (%x)\n", rtas_inst);
 		return;
 	}
 
@@ -988,7 +1051,7 @@
 
 	if (call_prom_ret("call-method", 3, 2, &entry,
 			  ADDR("instantiate-rtas"),
-			  rtas_inst, base) == PROM_ERROR
+			  rtas_inst, base) != 0
 	    || entry == 0) {
 		prom_printf(" failed\n");
 		return;
@@ -997,8 +1060,10 @@
 
 	reserve_mem(base, size);
 
-	prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
-	prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
+	prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
+		     &base, sizeof(base));
+	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
+		     &entry, sizeof(entry));
 
 	prom_debug("rtas base     = 0x%x\n", base);
 	prom_debug("rtas entry    = 0x%x\n", entry);
@@ -1089,10 +1154,6 @@
 		if (base < local_alloc_bottom)
 			local_alloc_bottom = base;
 
-		/* Save away the TCE table attributes for later use. */
-		prom_setprop(node, "linux,tce-base", &base, sizeof(base));
-		prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
-
 		/* It seems OF doesn't null-terminate the path :-( */
 		memset(path, 0, sizeof(path));
 		/* Call OF to setup the TCE hardware */
@@ -1101,6 +1162,10 @@
 			prom_printf("package-to-path failed\n");
 		}
 
+		/* Save away the TCE table attributes for later use. */
+		prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
+		prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
+
 		prom_debug("TCE table: %s\n", path);
 		prom_debug("\tnode = 0x%x\n", node);
 		prom_debug("\tbase = 0x%x\n", base);
@@ -1342,6 +1407,7 @@
 /*
  * For really old powermacs, we need to map things we claim.
  * For that, we need the ihandle of the mmu.
+ * Also, on the longtrail, we need to work around other bugs.
  */
 static void __init prom_find_mmu(void)
 {
@@ -1355,12 +1421,19 @@
 	if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
 		return;
 	version[sizeof(version) - 1] = 0;
-	prom_printf("OF version is '%s'\n", version);
 	/* XXX might need to add other versions here */
-	if (strcmp(version, "Open Firmware, 1.0.5") != 0)
+	if (strcmp(version, "Open Firmware, 1.0.5") == 0)
+		of_workarounds = OF_WA_CLAIM;
+	else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
+		of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
+		call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
+	} else
 		return;
+	_prom->memory = call_prom("open", 1, 1, ADDR("/memory"));
 	prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
 		     sizeof(_prom->mmumap));
+	if (!IHANDLE_VALID(_prom->memory) || !IHANDLE_VALID(_prom->mmumap))
+		of_workarounds &= ~OF_WA_CLAIM;		/* hmmm */
 }
 #else
 #define prom_find_mmu()
@@ -1382,16 +1455,17 @@
 	memset(path, 0, 256);
 	call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
 	val = call_prom("instance-to-package", 1, 1, _prom->stdout);
-	prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
+	prom_setprop(_prom->chosen, "/chosen", "linux,stdout-package",
+		     &val, sizeof(val));
 	prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
-	prom_setprop(_prom->chosen, "linux,stdout-path",
-		     RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
+	prom_setprop(_prom->chosen, "/chosen", "linux,stdout-path",
+		     path, strlen(path) + 1);
 
 	/* If it's a display, note it */
 	memset(type, 0, sizeof(type));
 	prom_getprop(val, "device_type", type, sizeof(type));
 	if (strcmp(type, RELOC("display")) == 0)
-		prom_setprop(val, "linux,boot-display", NULL, 0);
+		prom_setprop(val, path, "linux,boot-display", NULL, 0);
 }
 
 static void __init prom_close_stdin(void)
@@ -1514,7 +1588,7 @@
 
 		/* Success */
 		prom_printf("done\n");
-		prom_setprop(node, "linux,opened", NULL, 0);
+		prom_setprop(node, path, "linux,opened", NULL, 0);
 
 		/* Setup a usable color table when the appropriate
 		 * method is available. Should update this to set-colors */
@@ -1884,9 +1958,11 @@
 	/* interrupt on this revision of u3 is number 0 and level */
 	interrupts[0] = 0;
 	interrupts[1] = 1;
-	prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
+	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
+		     &interrupts, sizeof(interrupts));
 	parent = (u32)mpic;
-	prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
+	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
+		     &parent, sizeof(parent));
 #endif
 }
 
@@ -1922,11 +1998,11 @@
 		RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
 
 		val = RELOC(prom_initrd_start);
-		prom_setprop(_prom->chosen, "linux,initrd-start", &val,
-			     sizeof(val));
+		prom_setprop(_prom->chosen, "/chosen", "linux,initrd-start",
+			     &val, sizeof(val));
 		val = RELOC(prom_initrd_end);
-		prom_setprop(_prom->chosen, "linux,initrd-end", &val,
-			     sizeof(val));
+		prom_setprop(_prom->chosen, "/chosen", "linux,initrd-end",
+			     &val, sizeof(val));
 
 		reserve_mem(RELOC(prom_initrd_start),
 			    RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
@@ -1969,16 +2045,17 @@
 	prom_init_client_services(pp);
 
 	/*
+	 * See if this OF is old enough that we need to do explicit maps
+	 * and other workarounds
+	 */
+	prom_find_mmu();
+
+	/*
 	 * Init prom stdout device
 	 */
 	prom_init_stdout();
 
 	/*
-	 * See if this OF is old enough that we need to do explicit maps
-	 */
-	prom_find_mmu();
-
-	/*
 	 * Check for an initrd
 	 */
 	prom_check_initrd(r3, r4);
@@ -1989,14 +2066,15 @@
 	 */
 	RELOC(of_platform) = prom_find_machine_type();
 	getprop_rval = RELOC(of_platform);
-	prom_setprop(_prom->chosen, "linux,platform",
+	prom_setprop(_prom->chosen, "/chosen", "linux,platform",
 		     &getprop_rval, sizeof(getprop_rval));
 
 #ifdef CONFIG_PPC_PSERIES
 	/*
 	 * On pSeries, inform the firmware about our capabilities
 	 */
-	if (RELOC(of_platform) & PLATFORM_PSERIES)
+	if (RELOC(of_platform) == PLATFORM_PSERIES ||
+	    RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
 		prom_send_capabilities();
 #endif
 
@@ -2050,21 +2128,23 @@
 	 * Fill in some infos for use by the kernel later on
 	 */
 	if (RELOC(prom_memory_limit))
-		prom_setprop(_prom->chosen, "linux,memory-limit",
+		prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
 			     &RELOC(prom_memory_limit),
 			     sizeof(prom_memory_limit));
 #ifdef CONFIG_PPC64
 	if (RELOC(ppc64_iommu_off))
-		prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
+		prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
+			     NULL, 0);
 
 	if (RELOC(iommu_force_on))
-		prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
+		prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
+			     NULL, 0);
 
 	if (RELOC(prom_tce_alloc_start)) {
-		prom_setprop(_prom->chosen, "linux,tce-alloc-start",
+		prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-start",
 			     &RELOC(prom_tce_alloc_start),
 			     sizeof(prom_tce_alloc_start));
-		prom_setprop(_prom->chosen, "linux,tce-alloc-end",
+		prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-end",
 			     &RELOC(prom_tce_alloc_end),
 			     sizeof(prom_tce_alloc_end));
 	}
@@ -2081,8 +2161,13 @@
 	prom_printf("copying OF device tree ...\n");
 	flatten_device_tree();
 
-	/* in case stdin is USB and still active on IBM machines... */
-	prom_close_stdin();
+	/*
+	 * in case stdin is USB and still active on IBM machines...
+	 * Unfortunately quiesce crashes on some powermacs if we have
+	 * closed stdin already (in particular the powerbook 101).
+	 */
+	if (RELOC(of_platform) != PLATFORM_POWERMAC)
+		prom_close_stdin();
 
 	/*
 	 * Call OF "quiesce" method to shut down pending DMA's from
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 5bdd5b0..ae1a364 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -259,7 +259,7 @@
 {
 	struct proc_dir_entry *entry;
 
-	if (!(systemcfg->platform & PLATFORM_PSERIES))
+	if (_machine != PLATFORM_PSERIES && _machine != PLATFORM_PSERIES_LPAR)
 		return 1;
 
 	rtas_node = of_find_node_by_name(NULL, "rtas");
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 9d4e07f..4283fa3 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -29,9 +29,6 @@
 #include <asm/delay.h>
 #include <asm/uaccess.h>
 #include <asm/lmb.h>
-#ifdef CONFIG_PPC64
-#include <asm/systemcfg.h>
-#endif
 
 struct rtas_t rtas = {
 	.lock = SPIN_LOCK_UNLOCKED
@@ -671,7 +668,7 @@
 	 * the stop-self token if any
 	 */
 #ifdef CONFIG_PPC64
-	if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
+	if (_machine == PLATFORM_PSERIES_LPAR)
 		rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
 #endif
 	rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
similarity index 92%
rename from arch/ppc64/kernel/rtas_pci.c
rename to arch/powerpc/kernel/rtas_pci.c
index 3c3f191..0e5a8e1 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -5,19 +5,19 @@
  * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
  *
  * RTAS specific routines for PCI.
- * 
+ *
  * Based on code from pci.c, chrp_pci.c and pSeries_pci.c
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- *    
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
@@ -47,7 +47,7 @@
 static int ibm_read_pci_config;
 static int ibm_write_pci_config;
 
-static int config_access_valid(struct pci_dn *dn, int where)
+static inline int config_access_valid(struct pci_dn *dn, int where)
 {
 	if (where < 256)
 		return 1;
@@ -72,16 +72,14 @@
         return 0;
 }
 
-static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val)
+static int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
 {
 	int returnval = -1;
 	unsigned long buid, addr;
 	int ret;
-	struct pci_dn *pdn;
 
-	if (!dn || !dn->data)
+	if (!pdn)
 		return PCIBIOS_DEVICE_NOT_FOUND;
-	pdn = dn->data;
 	if (!config_access_valid(pdn, where))
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 
@@ -90,7 +88,7 @@
 	buid = pdn->phb->buid;
 	if (buid) {
 		ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
-				addr, buid >> 32, buid & 0xffffffff, size);
+				addr, BUID_HI(buid), BUID_LO(buid), size);
 	} else {
 		ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
 	}
@@ -100,7 +98,7 @@
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
 	if (returnval == EEH_IO_ERROR_VALUE(size) &&
-	    eeh_dn_check_failure (dn, NULL))
+	    eeh_dn_check_failure (pdn->node, NULL))
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
 	return PCIBIOS_SUCCESSFUL;
@@ -118,23 +116,23 @@
 		busdn = bus->sysdata;	/* must be a phb */
 
 	/* Search only direct children of the bus */
-	for (dn = busdn->child; dn; dn = dn->sibling)
-		if (dn->data && PCI_DN(dn)->devfn == devfn
+	for (dn = busdn->child; dn; dn = dn->sibling) {
+		struct pci_dn *pdn = PCI_DN(dn);
+		if (pdn && pdn->devfn == devfn
 		    && of_device_available(dn))
-			return rtas_read_config(dn, where, size, val);
+			return rtas_read_config(pdn, where, size, val);
+	}
 
 	return PCIBIOS_DEVICE_NOT_FOUND;
 }
 
-int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
+int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val)
 {
 	unsigned long buid, addr;
 	int ret;
-	struct pci_dn *pdn;
 
-	if (!dn || !dn->data)
+	if (!pdn)
 		return PCIBIOS_DEVICE_NOT_FOUND;
-	pdn = dn->data;
 	if (!config_access_valid(pdn, where))
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 
@@ -142,7 +140,8 @@
 		(pdn->devfn << 8) | (where & 0xff);
 	buid = pdn->phb->buid;
 	if (buid) {
-		ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val);
+		ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr,
+			BUID_HI(buid), BUID_LO(buid), size, (ulong) val);
 	} else {
 		ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val);
 	}
@@ -165,10 +164,12 @@
 		busdn = bus->sysdata;	/* must be a phb */
 
 	/* Search only direct children of the bus */
-	for (dn = busdn->child; dn; dn = dn->sibling)
-		if (dn->data && PCI_DN(dn)->devfn == devfn
+	for (dn = busdn->child; dn; dn = dn->sibling) {
+		struct pci_dn *pdn = PCI_DN(dn);
+		if (pdn && pdn->devfn == devfn
 		    && of_device_available(dn))
-			return rtas_write_config(dn, where, size, val);
+			return rtas_write_config(pdn, where, size, val);
+	}
 	return PCIBIOS_DEVICE_NOT_FOUND;
 }
 
@@ -221,7 +222,7 @@
 	/* Python's register file is 1 MB in size. */
 	chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000);
 
-	/* 
+	/*
 	 * Firmware doesn't always clear this bit which is critical
 	 * for good performance - Anton
 	 */
@@ -292,7 +293,7 @@
 	if (bus_range == NULL || len < 2 * sizeof(int)) {
 		return 1;
  	}
- 
+
 	phb->first_busno =  bus_range[0];
 	phb->last_busno  =  bus_range[1];
 
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index e22856e..bae4bff 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -33,6 +33,7 @@
 #include <asm/io.h>
 #include <asm/prom.h>
 #include <asm/processor.h>
+#include <asm/systemcfg.h>
 #include <asm/pgtable.h>
 #include <asm/smp.h>
 #include <asm/elf.h>
@@ -51,6 +52,9 @@
 #include <asm/page.h>
 #include <asm/mmu.h>
 #include <asm/lmb.h>
+#include <asm/xmon.h>
+
+#include "setup.h"
 
 #undef DEBUG
 
@@ -60,6 +64,13 @@
 #define DBG(fmt...)
 #endif
 
+#ifdef CONFIG_PPC_MULTIPLATFORM
+int _machine = 0;
+EXPORT_SYMBOL(_machine);
+#endif
+
+unsigned long klimit = (unsigned long) _end;
+
 /*
  * This still seems to be needed... -- paulus
  */ 
@@ -510,8 +521,8 @@
 	 * On pSeries LPAR, we need to know how many cpus
 	 * could possibly be added to this partition.
 	 */
-	if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
-				(dn = of_find_node_by_path("/rtas"))) {
+	if (_machine == PLATFORM_PSERIES_LPAR &&
+	    (dn = of_find_node_by_path("/rtas"))) {
 		int num_addr_cell, num_size_cell, maxcpus;
 		unsigned int *ireg;
 
@@ -555,7 +566,27 @@
 			cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
 	}
 
-	systemcfg->processorCount = num_present_cpus();
+	_systemcfg->processorCount = num_present_cpus();
 #endif /* CONFIG_PPC64 */
 }
 #endif /* CONFIG_SMP */
+
+#ifdef CONFIG_XMON
+static int __init early_xmon(char *p)
+{
+	/* ensure xmon is enabled */
+	if (p) {
+		if (strncmp(p, "on", 2) == 0)
+			xmon_init(1);
+		if (strncmp(p, "off", 3) == 0)
+			xmon_init(0);
+		if (strncmp(p, "early", 5) != 0)
+			return 0;
+	}
+	xmon_init(1);
+	debugger(NULL);
+
+	return 0;
+}
+early_param("xmon", early_xmon);
+#endif
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
new file mode 100644
index 0000000..2ebba75
--- /dev/null
+++ b/arch/powerpc/kernel/setup.h
@@ -0,0 +1,6 @@
+#ifndef _POWERPC_KERNEL_SETUP_H
+#define _POWERPC_KERNEL_SETUP_H
+
+void check_for_initrd(void);
+
+#endif /* _POWERPC_KERNEL_SETUP_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 3af2631..c98cfcc 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -40,6 +40,8 @@
 #include <asm/xmon.h>
 #include <asm/time.h>
 
+#include "setup.h"
+
 #define DBG(fmt...)
 
 #if defined CONFIG_KGDB
@@ -70,8 +72,6 @@
 int have_of = 1;
 
 #ifdef CONFIG_PPC_MULTIPLATFORM
-int _machine = 0;
-
 extern void prep_init(void);
 extern void pmac_init(void);
 extern void chrp_init(void);
@@ -279,7 +279,6 @@
 /* Warning, IO base is not yet inited */
 void __init setup_arch(char **cmdline_p)
 {
-	extern char *klimit;
 	extern void do_init_bootmem(void);
 
 	/* so udelay does something sensible, assume <= 1000 bogomips */
@@ -303,14 +302,9 @@
 		pmac_feature_init();	/* New cool way */
 #endif
 
-#ifdef CONFIG_XMON
-	xmon_map_scc();
-	if (strstr(cmd_line, "xmon")) {
-		xmon_init(1);
-		debugger(NULL);
-	}
-#endif /* CONFIG_XMON */
-	if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
+#ifdef CONFIG_XMON_DEFAULT
+	xmon_init(1);
+#endif
 
 #if defined(CONFIG_KGDB)
 	if (ppc_md.kgdb_map_scc)
@@ -343,7 +337,7 @@
 	init_mm.start_code = PAGE_OFFSET;
 	init_mm.end_code = (unsigned long) _etext;
 	init_mm.end_data = (unsigned long) _edata;
-	init_mm.brk = (unsigned long) klimit;
+	init_mm.brk = klimit;
 
 	/* Save unparsed command line copy for /proc/cmdline */
 	strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 0471e84..6791668 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -61,6 +61,8 @@
 #include <asm/xmon.h>
 #include <asm/udbg.h>
 
+#include "setup.h"
+
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
 #else
@@ -94,15 +96,6 @@
 	do { udbg_putc = call_rtas_display_status_delay; } while(0)
 #endif
 
-/* extern void *stab; */
-extern unsigned long klimit;
-
-extern void mm_init_ppc64(void);
-extern void stab_initialize(unsigned long stab);
-extern void htab_initialize(void);
-extern void early_init_devtree(void *flat_dt);
-extern void unflatten_device_tree(void);
-
 int have_of = 1;
 int boot_cpuid = 0;
 int boot_cpuid_phys = 0;
@@ -254,11 +247,10 @@
 	 * Iterate all ppc_md structures until we find the proper
 	 * one for the current machine type
 	 */
-	DBG("Probing machine type for platform %x...\n",
-	    systemcfg->platform);
+	DBG("Probing machine type for platform %x...\n", _machine);
 
 	for (mach = machines; *mach; mach++) {
-		if ((*mach)->probe(systemcfg->platform))
+		if ((*mach)->probe(_machine))
 			break;
 	}
 	/* What can we do if we didn't find ? */
@@ -290,6 +282,28 @@
 	DBG(" <- early_setup()\n");
 }
 
+#ifdef CONFIG_SMP
+void early_setup_secondary(void)
+{
+	struct paca_struct *lpaca = get_paca();
+
+	/* Mark enabled in PACA */
+	lpaca->proc_enabled = 0;
+
+	/* Initialize hash table for that CPU */
+	htab_initialize_secondary();
+
+	/* Initialize STAB/SLB. We use a virtual address as it works
+	 * in real mode on pSeries and we want a virutal address on
+	 * iSeries anyway
+	 */
+	if (cpu_has_feature(CPU_FTR_SLB))
+		slb_initialize();
+	else
+		stab_initialize(lpaca->stab_addr);
+}
+
+#endif /* CONFIG_SMP */
 
 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
 void smp_release_cpus(void)
@@ -315,7 +329,8 @@
 #endif /* CONFIG_SMP || CONFIG_KEXEC */
 
 /*
- * Initialize some remaining members of the ppc64_caches and systemcfg structures
+ * Initialize some remaining members of the ppc64_caches and systemcfg
+ * structures
  * (at least until we get rid of them completely). This is mostly some
  * cache informations about the CPU that will be used by cache flush
  * routines and/or provided to userland
@@ -340,7 +355,7 @@
 			const char *dc, *ic;
 
 			/* Then read cache informations */
-			if (systemcfg->platform == PLATFORM_POWERMAC) {
+			if (_machine == PLATFORM_POWERMAC) {
 				dc = "d-cache-block-size";
 				ic = "i-cache-block-size";
 			} else {
@@ -360,8 +375,8 @@
 				DBG("Argh, can't find dcache properties ! "
 				    "sizep: %p, lsizep: %p\n", sizep, lsizep);
 
-			systemcfg->dcache_size = ppc64_caches.dsize = size;
-			systemcfg->dcache_line_size =
+			_systemcfg->dcache_size = ppc64_caches.dsize = size;
+			_systemcfg->dcache_line_size =
 				ppc64_caches.dline_size = lsize;
 			ppc64_caches.log_dline_size = __ilog2(lsize);
 			ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
@@ -378,8 +393,8 @@
 				DBG("Argh, can't find icache properties ! "
 				    "sizep: %p, lsizep: %p\n", sizep, lsizep);
 
-			systemcfg->icache_size = ppc64_caches.isize = size;
-			systemcfg->icache_line_size =
+			_systemcfg->icache_size = ppc64_caches.isize = size;
+			_systemcfg->icache_line_size =
 				ppc64_caches.iline_size = lsize;
 			ppc64_caches.log_iline_size = __ilog2(lsize);
 			ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
@@ -387,10 +402,12 @@
 	}
 
 	/* Add an eye catcher and the systemcfg layout version number */
-	strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
-	systemcfg->version.major = SYSTEMCFG_MAJOR;
-	systemcfg->version.minor = SYSTEMCFG_MINOR;
-	systemcfg->processor = mfspr(SPRN_PVR);
+	strcpy(_systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
+	_systemcfg->version.major = SYSTEMCFG_MAJOR;
+	_systemcfg->version.minor = SYSTEMCFG_MINOR;
+	_systemcfg->processor = mfspr(SPRN_PVR);
+	_systemcfg->platform = _machine;
+	_systemcfg->physicalMemorySize = lmb_phys_mem_size();
 
 	DBG(" <- initialize_cache_info()\n");
 }
@@ -479,10 +496,10 @@
 	printk("-----------------------------------------------------\n");
 	printk("ppc64_pft_size                = 0x%lx\n", ppc64_pft_size);
 	printk("ppc64_interrupt_controller    = 0x%ld\n", ppc64_interrupt_controller);
-	printk("systemcfg                     = 0x%p\n", systemcfg);
-	printk("systemcfg->platform           = 0x%x\n", systemcfg->platform);
-	printk("systemcfg->processorCount     = 0x%lx\n", systemcfg->processorCount);
-	printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
+	printk("systemcfg                     = 0x%p\n", _systemcfg);
+	printk("systemcfg->platform           = 0x%x\n", _systemcfg->platform);
+	printk("systemcfg->processorCount     = 0x%lx\n", _systemcfg->processorCount);
+	printk("systemcfg->physicalMemorySize = 0x%lx\n", _systemcfg->physicalMemorySize);
 	printk("ppc64_caches.dcache_line_size = 0x%x\n",
 			ppc64_caches.dline_size);
 	printk("ppc64_caches.icache_line_size = 0x%x\n",
@@ -564,12 +581,12 @@
 	for (i = 0; i < __NR_syscalls; i++) {
 		if (sys_call_table[i*2] != sys_ni_syscall) {
 			count64++;
-			systemcfg->syscall_map_64[i >> 5] |=
+			_systemcfg->syscall_map_64[i >> 5] |=
 				0x80000000UL >> (i & 0x1f);
 		}
 		if (sys_call_table[i*2+1] != sys_ni_syscall) {
 			count32++;
-			systemcfg->syscall_map_32[i >> 5] |=
+			_systemcfg->syscall_map_32[i >> 5] |=
 				0x80000000UL >> (i & 0x1f);
 		}
 	}
@@ -858,26 +875,6 @@
 }
 EXPORT_SYMBOL(check_legacy_ioport);
 
-#ifdef CONFIG_XMON
-static int __init early_xmon(char *p)
-{
-	/* ensure xmon is enabled */
-	if (p) {
-		if (strncmp(p, "on", 2) == 0)
-			xmon_init(1);
-		if (strncmp(p, "off", 3) == 0)
-			xmon_init(0);
-		if (strncmp(p, "early", 5) != 0)
-			return 0;
-	}
-	xmon_init(1);
-	debugger(NULL);
-
-	return 0;
-}
-early_param("xmon", early_xmon);
-#endif
-
 void cpu_die(void)
 {
 	if (ppc_md.cpu_die)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 081d931..a7c4515 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -42,6 +42,7 @@
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
+#include <asm/sigcontext.h>
 #ifdef CONFIG_PPC64
 #include "ppc32.h"
 #include <asm/unistd.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 5c330c3..e28a139 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -44,6 +44,7 @@
 #include <asm/cputable.h>
 #include <asm/system.h>
 #include <asm/mpic.h>
+#include <asm/systemcfg.h>
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
 #endif
@@ -368,9 +369,11 @@
 	if (cpu == boot_cpuid)
 		return -EBUSY;
 
-	systemcfg->processorCount--;
 	cpu_clear(cpu, cpu_online_map);
+#ifdef CONFIG_PPC64
+	_systemcfg->processorCount--;
 	fixup_irqs(cpu_online_map);
+#endif
 	return 0;
 }
 
@@ -388,9 +391,11 @@
 	while (!cpu_online(cpu))
 		cpu_relax();
 
+#ifdef CONFIG_PPC64
 	fixup_irqs(cpu_online_map);
 	/* counter the irq disable in fixup_irqs */
 	local_irq_enable();
+#endif
 	return 0;
 }
 
@@ -419,7 +424,9 @@
 	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
 		cpu_relax();
 
+#ifdef CONFIG_PPC64
 	flush_tlb_pending();
+#endif
 	cpu_set(cpu, cpu_online_map);
 	local_irq_enable();
 }
@@ -510,6 +517,7 @@
 
 	smp_store_cpu_info(cpu);
 	set_dec(tb_ticks_per_jiffy);
+	preempt_disable();
 	cpu_callin_map[cpu] = 1;
 
 	smp_ops->setup_cpu(cpu);
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index a8210ed..9c921d1 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -52,7 +52,6 @@
 #include <asm/semaphore.h>
 #include <asm/time.h>
 #include <asm/mmu_context.h>
-#include <asm/systemcfg.h>
 #include <asm/ppc-pci.h>
 
 /* readdir & getdents */
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
similarity index 99%
rename from arch/ppc64/kernel/sysfs.c
rename to arch/powerpc/kernel/sysfs.c
index e99ec62..850af19 100644
--- a/arch/ppc64/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -232,7 +232,7 @@
 		sysdev_create_file(s, &attr_pmc7);
 	if (cur_cpu_spec->num_pmcs >= 8)
 		sysdev_create_file(s, &attr_pmc8);
-  
+
 	if (cpu_has_feature(CPU_FTR_SMT))
 		sysdev_create_file(s, &attr_purr);
 }
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index a6282b6..260b6ec 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -271,13 +271,13 @@
 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
 	 * loops back and reads them again until this criteria is met.
 	 */
-	++(systemcfg->tb_update_count);
+	++(_systemcfg->tb_update_count);
 	smp_wmb();
-	systemcfg->tb_orig_stamp = new_tb_stamp;
-	systemcfg->stamp_xsec = new_stamp_xsec;
-	systemcfg->tb_to_xs = new_tb_to_xs;
+	_systemcfg->tb_orig_stamp = new_tb_stamp;
+	_systemcfg->stamp_xsec = new_stamp_xsec;
+	_systemcfg->tb_to_xs = new_tb_to_xs;
 	smp_wmb();
-	++(systemcfg->tb_update_count);
+	++(_systemcfg->tb_update_count);
 #endif
 }
 
@@ -357,8 +357,9 @@
 				do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
 				tb_to_xs = divres.result_low;
 				do_gtod.varp->tb_to_xs = tb_to_xs;
-				systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
-				systemcfg->tb_to_xs = tb_to_xs;
+				_systemcfg->tb_ticks_per_sec =
+					tb_ticks_per_sec;
+				_systemcfg->tb_to_xs = tb_to_xs;
 			}
 			else {
 				printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
@@ -483,6 +484,8 @@
 	unsigned long offset = tb_ticks_per_jiffy / max_cpus;
 	unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
 
+	/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
+	previous_tb -= tb_ticks_per_jiffy;
 	for_each_cpu(i) {
 		if (i != boot_cpuid) {
 			previous_tb += offset;
@@ -559,8 +562,8 @@
 	update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
 
 #ifdef CONFIG_PPC64
-	systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
-	systemcfg->tz_dsttime = sys_tz.tz_dsttime;
+	_systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
+	_systemcfg->tz_dsttime = sys_tz.tz_dsttime;
 #endif
 
 	write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -711,11 +714,11 @@
 	do_gtod.varp->tb_to_xs = tb_to_xs;
 	do_gtod.tb_to_us = tb_to_us;
 #ifdef CONFIG_PPC64
-	systemcfg->tb_orig_stamp = tb_last_jiffy;
-	systemcfg->tb_update_count = 0;
-	systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
-	systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
-	systemcfg->tb_to_xs = tb_to_xs;
+	_systemcfg->tb_orig_stamp = tb_last_jiffy;
+	_systemcfg->tb_update_count = 0;
+	_systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
+	_systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
+	_systemcfg->tb_to_xs = tb_to_xs;
 #endif
 
 	time_freq = 0;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0578f83..2020bb7 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -129,7 +129,7 @@
 	nl = 1;
 #endif
 #ifdef CONFIG_PPC64
-	switch (systemcfg->platform) {
+	switch (_machine) {
 	case PLATFORM_PSERIES:
 		printk("PSERIES ");
 		nl = 1;
diff --git a/arch/powerpc/lib/bitops.c b/arch/powerpc/lib/bitops.c
index b67ce30..f68ad71 100644
--- a/arch/powerpc/lib/bitops.c
+++ b/arch/powerpc/lib/bitops.c
@@ -41,7 +41,7 @@
 	tmp = *p;
 
 found_first:
-	tmp &= (~0UL >> (64 - size));
+	tmp &= (~0UL >> (BITS_PER_LONG - size));
 	if (tmp == 0UL)		/* Are any bits set? */
 		return result + size;	/* Nope. */
 found_middle:
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 22e4748..706e8a6 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -84,10 +84,11 @@
 extern unsigned long dart_tablebase;
 #endif /* CONFIG_U3_DART */
 
+static unsigned long _SDR1;
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
 hpte_t *htab_address;
 unsigned long htab_hash_mask;
-unsigned long _SDR1;
-struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
 int mmu_linear_psize = MMU_PAGE_4K;
 int mmu_virtual_psize = MMU_PAGE_4K;
 #ifdef CONFIG_HUGETLB_PAGE
@@ -165,7 +166,7 @@
 		 * normal insert callback here.
 		 */
 #ifdef CONFIG_PPC_ISERIES
-		if (systemcfg->platform == PLATFORM_ISERIES_LPAR)
+		if (_machine == PLATFORM_ISERIES_LPAR)
 			ret = iSeries_hpte_insert(hpteg, va,
 						  virt_to_abs(paddr),
 						  tmp_mode,
@@ -174,7 +175,7 @@
 		else
 #endif
 #ifdef CONFIG_PPC_PSERIES
-		if (systemcfg->platform & PLATFORM_LPAR)
+		if (_machine & PLATFORM_LPAR)
 			ret = pSeries_lpar_hpte_insert(hpteg, va,
 						       virt_to_abs(paddr),
 						       tmp_mode,
@@ -293,7 +294,7 @@
 	 * Not in the device-tree, let's fallback on known size
 	 * list for 16M capable GP & GR
 	 */
-	if ((systemcfg->platform != PLATFORM_ISERIES_LPAR) &&
+	if ((_machine != PLATFORM_ISERIES_LPAR) &&
 	    cpu_has_feature(CPU_FTR_16M_PAGE))
 		memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
 		       sizeof(mmu_psize_defaults_gp));
@@ -364,7 +365,7 @@
 
 static unsigned long __init htab_get_table_size(void)
 {
-	unsigned long rnd_mem_size, pteg_count;
+	unsigned long mem_size, rnd_mem_size, pteg_count;
 
 	/* If hash size isn't already provided by the platform, we try to
 	 * retreive it from the device-tree. If it's not there neither, we
@@ -376,8 +377,9 @@
 		return 1UL << ppc64_pft_size;
 
 	/* round mem_size up to next power of 2 */
-	rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
-	if (rnd_mem_size < systemcfg->physicalMemorySize)
+	mem_size = lmb_phys_mem_size();
+	rnd_mem_size = 1UL << __ilog2(mem_size);
+	if (rnd_mem_size < mem_size)
 		rnd_mem_size <<= 1;
 
 	/* # pages / 2 */
@@ -386,6 +388,15 @@
 	return pteg_count << 7;
 }
 
+#ifdef CONFIG_MEMORY_HOTPLUG
+void create_section_mapping(unsigned long start, unsigned long end)
+{
+		BUG_ON(htab_bolt_mapping(start, end, start,
+			_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
+			mmu_linear_psize));
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
 void __init htab_initialize(void)
 {
 	unsigned long table, htab_size_bytes;
@@ -410,7 +421,7 @@
 
 	htab_hash_mask = pteg_count - 1;
 
-	if (systemcfg->platform & PLATFORM_LPAR) {
+	if (platform_is_lpar()) {
 		/* Using a hypervisor which owns the htab */
 		htab_address = NULL;
 		_SDR1 = 0; 
@@ -431,6 +442,9 @@
 
 		/* Initialize the HPT with no entries */
 		memset((void *)table, 0, htab_size_bytes);
+
+		/* Set SDR1 */
+		mtspr(SPRN_SDR1, _SDR1);
 	}
 
 	mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
@@ -500,6 +514,12 @@
 #undef KB
 #undef MB
 
+void __init htab_initialize_secondary(void)
+{
+	if (!platform_is_lpar())
+		mtspr(SPRN_SDR1, _SDR1);
+}
+
 /*
  * Called by asm hashtable.S for doing lazy icache flush
  */
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 4612a79..7d4b8b5 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -84,9 +84,6 @@
 /* XXX should be in current.h  -- paulus */
 extern struct task_struct *current_set[NR_CPUS];
 
-char *klimit = _end;
-struct device_node *memory_node;
-
 extern int init_bootmem_done;
 
 /*
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index ce974c8..1134f70 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -20,6 +20,8 @@
  *
  */
 
+#undef DEBUG
+
 #include <linux/config.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
@@ -64,6 +66,12 @@
 #include <asm/vdso.h>
 #include <asm/imalloc.h>
 
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
 #if PGTABLE_RANGE > USER_VSID_RANGE
 #warning Limited user VSID range means pagetable space is wasted
 #endif
@@ -72,8 +80,6 @@
 #warning TASK_SIZE is smaller than it needs to be.
 #endif
 
-unsigned long klimit = (unsigned long)_end;
-
 /* max amount of RAM to use */
 unsigned long __max_memory;
 
@@ -188,14 +194,14 @@
 }
 
 #ifdef CONFIG_PPC_64K_PAGES
-static const int pgtable_cache_size[2] = {
-	PTE_TABLE_SIZE, PGD_TABLE_SIZE
+static const unsigned int pgtable_cache_size[3] = {
+	PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
 };
 static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
-	"pte_pmd_cache", "pgd_cache",
+	"pte_pmd_cache", "pmd_cache", "pgd_cache",
 };
 #else
-static const int pgtable_cache_size[2] = {
+static const unsigned int pgtable_cache_size[2] = {
 	PTE_TABLE_SIZE, PMD_TABLE_SIZE
 };
 static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
@@ -213,6 +219,8 @@
 		int size = pgtable_cache_size[i];
 		const char *name = pgtable_cache_name[i];
 
+		DBG("Allocating page table cache %s (#%d) "
+		    "for size: %08x...\n", name, i, size);
 		pgtable_cache[i] = kmem_cache_create(name,
 						     size, size,
 						     SLAB_HWCACHE_ALIGN |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 6f55efd..1dd3cc6 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -110,6 +110,7 @@
 void online_page(struct page *page)
 {
 	ClearPageReserved(page);
+	set_page_count(page, 0);
 	free_cold_page(page);
 	totalram_pages++;
 	num_physpages++;
@@ -127,6 +128,9 @@
 	unsigned long start_pfn = start >> PAGE_SHIFT;
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 
+	start += KERNELBASE;
+	create_section_mapping(start, start + size);
+
 	/* this should work for most non-highmem platforms */
 	zone = pgdata->node_zones;
 
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 9008424..c7f7bb6 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -122,8 +122,11 @@
 		 *
 		 */
 		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
-				      mmu_virtual_psize))
-			panic("Can't map bolted IO mapping");
+				      mmu_virtual_psize)) {
+			printk(KERN_ERR "Failed to do bolted mapping IO "
+			       "memory at %016lx !\n", pa);
+			return -ENOMEM;
+		}
 	}
 	return 0;
 }
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index fa325db..cfbb4e1 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -20,6 +20,7 @@
 #include <asm/cputable.h>
 #include <asm/lmb.h>
 #include <asm/abs_addr.h>
+#include <asm/firmware.h>
 
 struct stab_entry {
 	unsigned long esid_data;
@@ -256,7 +257,7 @@
 
 		paca[cpu].stab_addr = newstab;
 		paca[cpu].stab_real = virt_to_abs(newstab);
-		printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx "
+		printk(KERN_INFO "Segment table for CPU %d at 0x%lx "
 		       "virtual, 0x%lx absolute\n",
 		       cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
 	}
@@ -270,10 +271,28 @@
 void stab_initialize(unsigned long stab)
 {
 	unsigned long vsid = get_kernel_vsid(KERNELBASE);
+	unsigned long stabreal;
 
 	asm volatile("isync; slbia; isync":::"memory");
 	make_ste(stab, GET_ESID(KERNELBASE), vsid);
 
 	/* Order update */
 	asm volatile("sync":::"memory");
+
+	/* Set ASR */
+	stabreal = get_paca()->stab_real | 0x1ul;
+
+#ifdef CONFIG_PPC_ISERIES
+	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+		HvCall1(HvCallBaseSetASR, stabreal);
+		return;
+	}
+#endif /* CONFIG_PPC_ISERIES */
+#ifdef CONFIG_PPC_PSERIES
+	if (platform_is_lpar()) {
+		plpar_hcall_norets(H_SET_ASR, stabreal);
+		return;
+	}
+#endif
+	mtspr(SPRN_ASR, stabreal);
 }
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index c4ee547..e3a024e 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -233,8 +233,7 @@
 	mmcra = mfspr(SPRN_MMCRA);
 
 	/* Were we in the hypervisor? */
-	if ((systemcfg->platform == PLATFORM_PSERIES_LPAR) &&
-	    (mmcra & MMCRA_SIHV))
+	if (platform_is_lpar() && (mmcra & MMCRA_SIHV))
 		/* function descriptor madness */
 		return *((unsigned long *)hypervisor_bucket);
 
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index ecd32d5..4099dda 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -361,7 +361,9 @@
 	printk(KERN_INFO "OpenPIC at %lx\n", opaddr);
 
 	irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
-	prom_get_irq_senses(init_senses, NUM_8259_INTERRUPTS, NR_IRQS - 4);
+	prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4);
+	/* i8259 cascade is always positive level */
+	init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE;
 
 	iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len);
 	if (iranges == NULL)
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index a06603d..01090e9 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -103,6 +103,9 @@
 		struct pt_regs *regsParm)
 {
 	int irq;
+#ifdef CONFIG_IRQSTACKS
+	struct thread_info *curtp, *irqtp;
+#endif
 
 	++Pci_Interrupt_Count;
 
@@ -110,7 +113,20 @@
 	case XmPciLpEvent_SlotInterrupt:
 		irq = eventParm->hvLpEvent.xCorrelationToken;
 		/* Dispatch the interrupt handlers for this irq */
-		ppc_irq_dispatch_handler(regsParm, irq);
+#ifdef CONFIG_IRQSTACKS
+		/* Switch to the irq stack to handle this */
+		curtp = current_thread_info();
+		irqtp = hardirq_ctx[smp_processor_id()];
+		if (curtp != irqtp) {
+			irqtp->task = curtp->task;
+			irqtp->flags = 0;
+			call___do_IRQ(irq, regsParm, irqtp);
+			irqtp->task = NULL;
+			if (irqtp->flags)
+				set_bits(irqtp->flags, &curtp->flags);
+		} else
+#endif
+			__do_IRQ(irq, regsParm);
 		HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
 			eventParm->eventData.slotInterrupt.subBusNumber,
 			eventParm->eventData.slotInterrupt.deviceId);
@@ -310,10 +326,8 @@
 }
 
 /*
- * Need to define this so ppc_irq_dispatch_handler will NOT call
- * enable_IRQ at the end of interrupt handling.  However, this does
- * nothing because there is not enough information provided to do
- * the EOI HvCall.  This is done by XmPciLpEvent.c
+ * This does nothing because there is not enough information
+ * provided to do the EOI HvCall.  This is done by XmPciLpEvent.c
  */
 static void iSeries_end_IRQ(unsigned int irq)
 {
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
index 09f1452..dfe7aa1 100644
--- a/arch/powerpc/platforms/iseries/misc.S
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -15,6 +15,7 @@
 
 #include <asm/processor.h>
 #include <asm/asm-offsets.h>
+#include <asm/ppc_asm.h>
 
 	.text
 
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 7f8f0cd..6a29f30 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -39,7 +39,8 @@
 #include <asm/sections.h>
 #include <asm/iommu.h>
 #include <asm/firmware.h>
-
+#include <asm/systemcfg.h>
+#include <asm/system.h>
 #include <asm/time.h>
 #include <asm/paca.h>
 #include <asm/cache.h>
@@ -71,7 +72,7 @@
 #endif
 
 /* Function Prototypes */
-static void build_iSeries_Memory_Map(void);
+static unsigned long build_iSeries_Memory_Map(void);
 static void iseries_shared_idle(void);
 static void iseries_dedicated_idle(void);
 #ifdef CONFIG_PCI
@@ -84,7 +85,6 @@
 int piranha_simulator;
 
 extern int rd_size;		/* Defined in drivers/block/rd.c */
-extern unsigned long klimit;
 extern unsigned long embedded_sysmap_start;
 extern unsigned long embedded_sysmap_end;
 
@@ -403,9 +403,11 @@
  * a table used to translate Linux's physical addresses to these
  * absolute addresses.  Absolute addresses are needed when
  * communicating with the hypervisor (e.g. to build HPT entries)
+ *
+ * Returns the physical memory size
  */
 
-static void __init build_iSeries_Memory_Map(void)
+static unsigned long __init build_iSeries_Memory_Map(void)
 {
 	u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
 	u32 nextPhysChunk;
@@ -538,7 +540,7 @@
 	 * which should be equal to
 	 *   nextPhysChunk
 	 */
-	systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
+	return chunk_to_addr(nextPhysChunk);
 }
 
 /*
@@ -564,8 +566,8 @@
 	printk("Max physical processors = %d\n",
 			itVpdAreas.xSlicMaxPhysicalProcs);
 
-	systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
-	printk("Processor version = %x\n", systemcfg->processor);
+	_systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
+	printk("Processor version = %x\n", _systemcfg->processor);
 }
 
 static void iSeries_show_cpuinfo(struct seq_file *m)
@@ -702,7 +704,6 @@
 
 static void iseries_dedicated_idle(void)
 {
-	long oldval;
 	set_thread_flag(TIF_POLLING_NRFLAG);
 
 	while (1) {
@@ -929,7 +930,7 @@
 	dt_end_node(dt);
 }
 
-void build_flat_dt(struct iseries_flat_dt *dt)
+void build_flat_dt(struct iseries_flat_dt *dt, unsigned long phys_mem_size)
 {
 	u64 tmp[2];
 
@@ -945,7 +946,7 @@
 	dt_prop_str(dt, "name", "memory");
 	dt_prop_str(dt, "device_type", "memory");
 	tmp[0] = 0;
-	tmp[1] = systemcfg->physicalMemorySize;
+	tmp[1] = phys_mem_size;
 	dt_prop_u64_list(dt, "reg", tmp, 2);
 	dt_end_node(dt);
 
@@ -965,13 +966,15 @@
 
 void * __init iSeries_early_setup(void)
 {
+	unsigned long phys_mem_size;
+
 	iSeries_fixup_klimit();
 
 	/*
 	 * Initialize the table which translate Linux physical addresses to
 	 * AS/400 absolute addresses
 	 */
-	build_iSeries_Memory_Map();
+	phys_mem_size = build_iSeries_Memory_Map();
 
 	iSeries_get_cmdline();
 
@@ -981,7 +984,7 @@
 	/* Parse early parameters, in particular mem=x */
 	parse_early_param();
 
-	build_flat_dt(&iseries_dt);
+	build_flat_dt(&iseries_dt, phys_mem_size);
 
 	return (void *) __pa(&iseries_dt);
 }
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 340c21c..895aeb3 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -380,9 +380,6 @@
 	for_each_pci_dev(dev)
 		pci_read_irq_line(dev);
 
-	/* Do the mapping of the IO space */
-	phbs_remap_io();
-
 	DBG(" <- maple_pcibios_fixup\n");
 }
 
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 8f818d0..dfd41b9 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -918,9 +918,6 @@
 			PCI_DN(np)->busno = 0xf0;
 	}
 
-	/* map in PCI I/O space */
-	phbs_remap_io();
-
 	/* pmac_check_ht_link(); */
 
 	/* Tell pci.c to not use the common resource allocation mechanism */
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 83a49e8..90040c4 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -74,6 +74,9 @@
 #define GATWICK_IRQ_POOL_SIZE        10
 static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
 
+#define NR_MASK_WORDS	((NR_IRQS + 31) / 32)
+static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
+
 /*
  * Mark an irq as "lost".  This is only used on the pmac
  * since it can lose interrupts (see pmac_set_irq_mask).
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index e1f9443..957b091 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -305,9 +305,19 @@
 	psurge_start = ioremap(PSURGE_START, 4);
 	psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
 
-	/* this is not actually strictly necessary -- paulus. */
-	for (i = 1; i < ncpus; ++i)
-		smp_hw_index[i] = i;
+	/*
+	 * This is necessary because OF doesn't know about the
+	 * secondary cpu(s), and thus there aren't nodes in the
+	 * device tree for them, and smp_setup_cpu_maps hasn't
+	 * set their bits in cpu_possible_map and cpu_present_map.
+	 */
+	if (ncpus > NR_CPUS)
+		ncpus = NR_CPUS;
+	for (i = 1; i < ncpus ; ++i) {
+		cpu_set(i, cpu_present_map);
+		cpu_set(i, cpu_possible_map);
+		set_hard_smp_processor_id(i, i);
+	}
 
 	if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
 
@@ -348,6 +358,7 @@
 	int t;
 
 	set_dec(tb_ticks_per_jiffy);
+	/* XXX fixme */
 	set_tb(0, 0);
 	last_jiffy_stamp(cpu_nr) = 0;
 
@@ -363,8 +374,6 @@
 
 	/* now interrupt the secondary, starting both TBs */
 	psurge_set_ipi(1);
-
-	smp_tb_synchronized = 1;
 }
 
 static struct irqaction psurge_irqaction = {
@@ -625,9 +634,8 @@
 	for (t = 100000; t > 0 && sec_tb_reset; --t)
 		udelay(10);
 	if (sec_tb_reset)
+		/* XXX BUG_ON here? */
 		printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
-	else
-		smp_tb_synchronized = 1;
 
 	/* Now, restart the timebase by leaving the GPIO to an open collector */
        	pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
@@ -810,19 +818,9 @@
 }
 
 
-/* Core99 Macs (dual G4s and G5s) */
-struct smp_ops_t core99_smp_ops = {
-	.message_pass	= smp_mpic_message_pass,
-	.probe		= smp_core99_probe,
-	.kick_cpu	= smp_core99_kick_cpu,
-	.setup_cpu	= smp_core99_setup_cpu,
-	.give_timebase	= smp_core99_give_timebase,
-	.take_timebase	= smp_core99_take_timebase,
-};
-
 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
 
-int __cpu_disable(void)
+int smp_core99_cpu_disable(void)
 {
 	cpu_clear(smp_processor_id(), cpu_online_map);
 
@@ -846,7 +844,7 @@
 	low_cpu_die();
 }
 
-void __cpu_die(unsigned int cpu)
+void smp_core99_cpu_die(unsigned int cpu)
 {
 	int timeout;
 
@@ -858,8 +856,21 @@
 		}
 		msleep(1);
 	}
-	cpu_callin_map[cpu] = 0;
 	cpu_dead[cpu] = 0;
 }
 
 #endif
+
+/* Core99 Macs (dual G4s and G5s) */
+struct smp_ops_t core99_smp_ops = {
+	.message_pass	= smp_mpic_message_pass,
+	.probe		= smp_core99_probe,
+	.kick_cpu	= smp_core99_kick_cpu,
+	.setup_cpu	= smp_core99_setup_cpu,
+	.give_timebase	= smp_core99_give_timebase,
+	.take_timebase	= smp_core99_take_timebase,
+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
+	.cpu_disable	= smp_core99_cpu_disable,
+	.cpu_die	= smp_core99_cpu_die,
+#endif
+};
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index b9938fe..e7ca5b1 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -3,3 +3,5 @@
 obj-$(CONFIG_SMP)	+= smp.o
 obj-$(CONFIG_IBMVIO)	+= vio.o
 obj-$(CONFIG_XICS)	+= xics.o
+obj-$(CONFIG_SCANLOG)	+= scanlog.o
+obj-$(CONFIG_EEH)    += eeh.o eeh_event.o
diff --git a/arch/ppc64/kernel/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
similarity index 63%
rename from arch/ppc64/kernel/eeh.c
rename to arch/powerpc/platforms/pseries/eeh.c
index 035d1b1..79de231 100644
--- a/arch/ppc64/kernel/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1,39 +1,37 @@
 /*
  * eeh.c
  * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
-#include <linux/bootmem.h>
+#include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/notifier.h>
 #include <linux/pci.h>
 #include <linux/proc_fs.h>
 #include <linux/rbtree.h>
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
+#include <asm/atomic.h>
 #include <asm/eeh.h>
+#include <asm/eeh_event.h>
 #include <asm/io.h>
 #include <asm/machdep.h>
-#include <asm/rtas.h>
-#include <asm/atomic.h>
-#include <asm/systemcfg.h>
 #include <asm/ppc-pci.h>
+#include <asm/rtas.h>
 
 #undef DEBUG
 
@@ -49,8 +47,8 @@
  *  were "empty": all reads return 0xff's and all writes are silently
  *  ignored.  EEH slot isolation events can be triggered by parity
  *  errors on the address or data busses (e.g. during posted writes),
- *  which in turn might be caused by dust, vibration, humidity,
- *  radioactivity or plain-old failed hardware.
+ *  which in turn might be caused by low voltage on the bus, dust,
+ *  vibration, humidity, radioactivity or plain-old failed hardware.
  *
  *  Note, however, that one of the leading causes of EEH slot
  *  freeze events are buggy device drivers, buggy device microcode,
@@ -71,26 +69,15 @@
  *  and sent out for processing.
  */
 
-/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
-#define BUID_HI(buid) ((buid) >> 32)
-#define BUID_LO(buid) ((buid) & 0xffffffff)
-
-/* EEH event workqueue setup. */
-static DEFINE_SPINLOCK(eeh_eventlist_lock);
-LIST_HEAD(eeh_eventlist);
-static void eeh_event_handler(void *);
-DECLARE_WORK(eeh_event_wq, eeh_event_handler, NULL);
-
-static struct notifier_block *eeh_notifier_chain;
-
-/*
- * If a device driver keeps reading an MMIO register in an interrupt
+/* If a device driver keeps reading an MMIO register in an interrupt
  * handler after a slot isolation event has occurred, we assume it
  * is broken and panic.  This sets the threshold for how many read
  * attempts we allow before panicking.
  */
-#define EEH_MAX_FAILS	1000
-static atomic_t eeh_fail_count;
+#define EEH_MAX_FAILS	100000
+
+/* Misc forward declaraions */
+static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn);
 
 /* RTAS tokens */
 static int ibm_set_eeh_option;
@@ -101,12 +88,19 @@
 
 static int eeh_subsystem_enabled;
 
+/* Lock to avoid races due to multiple reports of an error */
+static DEFINE_SPINLOCK(confirm_error_lock);
+
 /* Buffer for reporting slot-error-detail rtas calls */
 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
 static DEFINE_SPINLOCK(slot_errbuf_lock);
 static int eeh_error_buf_size;
 
 /* System monitoring statistics */
+static DEFINE_PER_CPU(unsigned long, no_device);
+static DEFINE_PER_CPU(unsigned long, no_dn);
+static DEFINE_PER_CPU(unsigned long, no_cfg_addr);
+static DEFINE_PER_CPU(unsigned long, ignored_check);
 static DEFINE_PER_CPU(unsigned long, total_mmio_ffs);
 static DEFINE_PER_CPU(unsigned long, false_positives);
 static DEFINE_PER_CPU(unsigned long, ignored_failures);
@@ -224,9 +218,9 @@
 	while (*p) {
 		parent = *p;
 		piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
-		if (alo < piar->addr_lo) {
+		if (ahi < piar->addr_lo) {
 			p = &parent->rb_left;
-		} else if (ahi > piar->addr_hi) {
+		} else if (alo > piar->addr_hi) {
 			p = &parent->rb_right;
 		} else {
 			if (dev != piar->pcidev ||
@@ -245,6 +239,11 @@
 	piar->pcidev = dev;
 	piar->flags = flags;
 
+#ifdef DEBUG
+	printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
+	                  alo, ahi, pci_name (dev));
+#endif
+
 	rb_link_node(&piar->rb_node, parent, p);
 	rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
 
@@ -260,18 +259,17 @@
 
 	dn = pci_device_to_OF_node(dev);
 	if (!dn) {
-		printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n",
-			pci_name(dev));
+		printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
 		return;
 	}
 
 	/* Skip any devices for which EEH is not enabled. */
-	pdn = dn->data;
+	pdn = PCI_DN(dn);
 	if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
 	    pdn->eeh_mode & EEH_MODE_NOCHECK) {
 #ifdef DEBUG
-		printk(KERN_INFO "PCI: skip building address cache for=%s\n",
-		       pci_name(dev));
+		printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n",
+		       pci_name(dev), pdn->node->full_name);
 #endif
 		return;
 	}
@@ -307,7 +305,7 @@
  * we maintain a cache of devices that can be quickly searched.
  * This routine adds a device to that cache.
  */
-void pci_addr_cache_insert_device(struct pci_dev *dev)
+static void pci_addr_cache_insert_device(struct pci_dev *dev)
 {
 	unsigned long flags;
 
@@ -350,7 +348,7 @@
  * the tree multiple times (once per resource).
  * But so what; device removal doesn't need to be that fast.
  */
-void pci_addr_cache_remove_device(struct pci_dev *dev)
+static void pci_addr_cache_remove_device(struct pci_dev *dev)
 {
 	unsigned long flags;
 
@@ -370,8 +368,12 @@
  */
 void __init pci_addr_cache_build(void)
 {
+	struct device_node *dn;
 	struct pci_dev *dev = NULL;
 
+	if (!eeh_subsystem_enabled)
+		return;
+
 	spin_lock_init(&pci_io_addr_cache_root.piar_lock);
 
 	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
@@ -380,6 +382,10 @@
 			continue;
 		}
 		pci_addr_cache_insert_device(dev);
+
+		/* Save the BAR's; firmware doesn't restore these after EEH reset */
+		dn = pci_device_to_OF_node(dev);
+		eeh_save_bars(dev, PCI_DN(dn));
 	}
 
 #ifdef DEBUG
@@ -391,22 +397,26 @@
 /* --------------------------------------------------------------- */
 /* Above lies the PCI Address Cache. Below lies the EEH event infrastructure */
 
-/**
- * eeh_register_notifier - Register to find out about EEH events.
- * @nb: notifier block to callback on events
- */
-int eeh_register_notifier(struct notifier_block *nb)
+void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
 {
-	return notifier_chain_register(&eeh_notifier_chain, nb);
-}
+	unsigned long flags;
+	int rc;
 
-/**
- * eeh_unregister_notifier - Unregister to an EEH event notifier.
- * @nb: notifier block to callback on events
- */
-int eeh_unregister_notifier(struct notifier_block *nb)
-{
-	return notifier_chain_unregister(&eeh_notifier_chain, nb);
+	/* Log the error with the rtas logger */
+	spin_lock_irqsave(&slot_errbuf_lock, flags);
+	memset(slot_errbuf, 0, eeh_error_buf_size);
+
+	rc = rtas_call(ibm_slot_error_detail,
+	               8, 1, NULL, pdn->eeh_config_addr,
+	               BUID_HI(pdn->phb->buid),
+	               BUID_LO(pdn->phb->buid), NULL, 0,
+	               virt_to_phys(slot_errbuf),
+	               eeh_error_buf_size,
+	               severity);
+
+	if (rc == 0)
+		log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
+	spin_unlock_irqrestore(&slot_errbuf_lock, flags);
 }
 
 /**
@@ -414,16 +424,16 @@
  * @dn: device node to read
  * @rets: array to return results in
  */
-static int read_slot_reset_state(struct device_node *dn, int rets[])
+static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
 {
 	int token, outputs;
-	struct pci_dn *pdn = dn->data;
 
 	if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
 		token = ibm_read_slot_reset_state2;
 		outputs = 4;
 	} else {
 		token = ibm_read_slot_reset_state;
+		rets[2] = 0; /* fake PE Unavailable info */
 		outputs = 3;
 	}
 
@@ -432,75 +442,8 @@
 }
 
 /**
- * eeh_panic - call panic() for an eeh event that cannot be handled.
- * The philosophy of this routine is that it is better to panic and
- * halt the OS than it is to risk possible data corruption by
- * oblivious device drivers that don't know better.
- *
- * @dev pci device that had an eeh event
- * @reset_state current reset state of the device slot
- */
-static void eeh_panic(struct pci_dev *dev, int reset_state)
-{
-	/*
-	 * XXX We should create a separate sysctl for this.
-	 *
-	 * Since the panic_on_oops sysctl is used to halt the system
-	 * in light of potential corruption, we can use it here.
-	 */
-	if (panic_on_oops)
-		panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
-		      pci_name(dev));
-	else {
-		__get_cpu_var(ignored_failures)++;
-		printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
-		       reset_state, pci_name(dev));
-	}
-}
-
-/**
- * eeh_event_handler - dispatch EEH events.  The detection of a frozen
- * slot can occur inside an interrupt, where it can be hard to do
- * anything about it.  The goal of this routine is to pull these
- * detection events out of the context of the interrupt handler, and
- * re-dispatch them for processing at a later time in a normal context.
- *
- * @dummy - unused
- */
-static void eeh_event_handler(void *dummy)
-{
-	unsigned long flags;
-	struct eeh_event	*event;
-
-	while (1) {
-		spin_lock_irqsave(&eeh_eventlist_lock, flags);
-		event = NULL;
-		if (!list_empty(&eeh_eventlist)) {
-			event = list_entry(eeh_eventlist.next, struct eeh_event, list);
-			list_del(&event->list);
-		}
-		spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
-		if (event == NULL)
-			break;
-
-		printk(KERN_INFO "EEH: MMIO failure (%d), notifiying device "
-		       "%s\n", event->reset_state,
-		       pci_name(event->dev));
-
-		atomic_set(&eeh_fail_count, 0);
-		notifier_call_chain (&eeh_notifier_chain,
-				     EEH_NOTIFY_FREEZE, event);
-
-		__get_cpu_var(slot_resets)++;
-
-		pci_dev_put(event->dev);
-		kfree(event);
-	}
-}
-
-/**
  * eeh_token_to_phys - convert EEH address token to phys address
- * @token i/o token, should be address in the form 0xE....
+ * @token i/o token, should be address in the form 0xA....
  */
 static inline unsigned long eeh_token_to_phys(unsigned long token)
 {
@@ -515,6 +458,70 @@
 	return pa | (token & (PAGE_SIZE-1));
 }
 
+/** 
+ * Return the "partitionable endpoint" (pe) under which this device lies
+ */
+static struct device_node * find_device_pe(struct device_node *dn)
+{
+	while ((dn->parent) && PCI_DN(dn->parent) &&
+	      (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
+		dn = dn->parent;
+	}
+	return dn;
+}
+
+/** Mark all devices that are peers of this device as failed.
+ *  Mark the device driver too, so that it can see the failure
+ *  immediately; this is critical, since some drivers poll
+ *  status registers in interrupts ... If a driver is polling,
+ *  and the slot is frozen, then the driver can deadlock in
+ *  an interrupt context, which is bad.
+ */
+
+static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
+{
+	while (dn) {
+		if (PCI_DN(dn)) {
+			PCI_DN(dn)->eeh_mode |= mode_flag;
+
+			if (dn->child)
+				__eeh_mark_slot (dn->child, mode_flag);
+		}
+		dn = dn->sibling;
+	}
+}
+
+void eeh_mark_slot (struct device_node *dn, int mode_flag)
+{
+	dn = find_device_pe (dn);
+	PCI_DN(dn)->eeh_mode |= mode_flag;
+	__eeh_mark_slot (dn->child, mode_flag);
+}
+
+static void __eeh_clear_slot (struct device_node *dn, int mode_flag)
+{
+	while (dn) {
+		if (PCI_DN(dn)) {
+			PCI_DN(dn)->eeh_mode &= ~mode_flag;
+			PCI_DN(dn)->eeh_check_count = 0;
+			if (dn->child)
+				__eeh_clear_slot (dn->child, mode_flag);
+		}
+		dn = dn->sibling;
+	}
+}
+
+void eeh_clear_slot (struct device_node *dn, int mode_flag)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&confirm_error_lock, flags);
+	dn = find_device_pe (dn);
+	PCI_DN(dn)->eeh_mode &= ~mode_flag;
+	PCI_DN(dn)->eeh_check_count = 0;
+	__eeh_clear_slot (dn->child, mode_flag);
+	spin_unlock_irqrestore(&confirm_error_lock, flags);
+}
+
 /**
  * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze
  * @dn device node
@@ -526,7 +533,7 @@
  * will query firmware for the EEH status.
  *
  * Returns 0 if there has not been an EEH error; otherwise returns
- * a non-zero value and queues up a solt isolation event notification.
+ * a non-zero value and queues up a slot isolation event notification.
  *
  * It is safe to call this routine in an interrupt context.
  */
@@ -535,42 +542,59 @@
 	int ret;
 	int rets[3];
 	unsigned long flags;
-	int rc, reset_state;
-	struct eeh_event  *event;
 	struct pci_dn *pdn;
+	int rc = 0;
 
 	__get_cpu_var(total_mmio_ffs)++;
 
 	if (!eeh_subsystem_enabled)
 		return 0;
 
-	if (!dn)
+	if (!dn) {
+		__get_cpu_var(no_dn)++;
 		return 0;
-	pdn = dn->data;
+	}
+	pdn = PCI_DN(dn);
 
 	/* Access to IO BARs might get this far and still not want checking. */
-	if (!pdn->eeh_capable || !(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
+	if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
 	    pdn->eeh_mode & EEH_MODE_NOCHECK) {
+		__get_cpu_var(ignored_check)++;
+#ifdef DEBUG
+		printk ("EEH:ignored check (%x) for %s %s\n", 
+		        pdn->eeh_mode, pci_name (dev), dn->full_name);
+#endif
 		return 0;
 	}
 
 	if (!pdn->eeh_config_addr) {
+		__get_cpu_var(no_cfg_addr)++;
 		return 0;
 	}
 
-	/*
-	 * If we already have a pending isolation event for this
-	 * slot, we know it's bad already, we don't need to check...
+	/* If we already have a pending isolation event for this
+	 * slot, we know it's bad already, we don't need to check.
+	 * Do this checking under a lock; as multiple PCI devices
+	 * in one slot might report errors simultaneously, and we
+	 * only want one error recovery routine running.
 	 */
+	spin_lock_irqsave(&confirm_error_lock, flags);
+	rc = 1;
 	if (pdn->eeh_mode & EEH_MODE_ISOLATED) {
-		atomic_inc(&eeh_fail_count);
-		if (atomic_read(&eeh_fail_count) >= EEH_MAX_FAILS) {
+		pdn->eeh_check_count ++;
+		if (pdn->eeh_check_count >= EEH_MAX_FAILS) {
+			printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n",
+			        pdn->eeh_check_count);
+			dump_stack();
+			
 			/* re-read the slot reset state */
-			if (read_slot_reset_state(dn, rets) != 0)
+			if (read_slot_reset_state(pdn, rets) != 0)
 				rets[0] = -1;	/* reset state unknown */
-			eeh_panic(dev, rets[0]);
+
+			/* If we are here, then we hit an infinite loop. Stop. */
+			panic("EEH: MMIO halt (%d) on device:%s\n", rets[0], pci_name(dev));
 		}
-		return 0;
+		goto dn_unlock;
 	}
 
 	/*
@@ -580,66 +604,69 @@
 	 * function zero of a multi-function device.
 	 * In any case they must share a common PHB.
 	 */
-	ret = read_slot_reset_state(dn, rets);
-	if (!(ret == 0 && rets[1] == 1 && (rets[0] == 2 || rets[0] == 4))) {
+	ret = read_slot_reset_state(pdn, rets);
+
+	/* If the call to firmware failed, punt */
+	if (ret != 0) {
+		printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
+		       ret, dn->full_name);
 		__get_cpu_var(false_positives)++;
-		return 0;
+		rc = 0;
+		goto dn_unlock;
 	}
 
-	/* prevent repeated reports of this failure */
-	pdn->eeh_mode |= EEH_MODE_ISOLATED;
+	/* If EEH is not supported on this device, punt. */
+	if (rets[1] != 1) {
+		printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n",
+		       ret, dn->full_name);
+		__get_cpu_var(false_positives)++;
+		rc = 0;
+		goto dn_unlock;
+	}
 
-	reset_state = rets[0];
+	/* If not the kind of error we know about, punt. */
+	if (rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
+		__get_cpu_var(false_positives)++;
+		rc = 0;
+		goto dn_unlock;
+	}
 
-	spin_lock_irqsave(&slot_errbuf_lock, flags);
-	memset(slot_errbuf, 0, eeh_error_buf_size);
+	/* Note that config-io to empty slots may fail;
+	 * we recognize empty because they don't have children. */
+	if ((rets[0] == 5) && (dn->child == NULL)) {
+		__get_cpu_var(false_positives)++;
+		rc = 0;
+		goto dn_unlock;
+	}
 
-	rc = rtas_call(ibm_slot_error_detail,
-	               8, 1, NULL, pdn->eeh_config_addr,
-	               BUID_HI(pdn->phb->buid),
-	               BUID_LO(pdn->phb->buid), NULL, 0,
-	               virt_to_phys(slot_errbuf),
-	               eeh_error_buf_size,
-	               1 /* Temporary Error */);
+	__get_cpu_var(slot_resets)++;
+ 
+	/* Avoid repeated reports of this failure, including problems
+	 * with other functions on this device, and functions under
+	 * bridges. */
+	eeh_mark_slot (dn, EEH_MODE_ISOLATED);
+	spin_unlock_irqrestore(&confirm_error_lock, flags);
 
-	if (rc == 0)
-		log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
-	spin_unlock_irqrestore(&slot_errbuf_lock, flags);
-
-	printk(KERN_INFO "EEH: MMIO failure (%d) on device: %s %s\n",
-	       rets[0], dn->name, dn->full_name);
-	event = kmalloc(sizeof(*event), GFP_ATOMIC);
-	if (event == NULL) {
-		eeh_panic(dev, reset_state);
-		return 1;
- 	}
-
-	event->dev = dev;
-	event->dn = dn;
-	event->reset_state = reset_state;
-
-	/* We may or may not be called in an interrupt context */
-	spin_lock_irqsave(&eeh_eventlist_lock, flags);
-	list_add(&event->list, &eeh_eventlist);
-	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
-
+	eeh_send_failure_event (dn, dev, rets[0], rets[2]);
+	
 	/* Most EEH events are due to device driver bugs.  Having
 	 * a stack trace will help the device-driver authors figure
 	 * out what happened.  So print that out. */
-	dump_stack();
-	schedule_work(&eeh_event_wq);
+	if (rets[0] != 5) dump_stack();
+	return 1;
 
-	return 0;
+dn_unlock:
+	spin_unlock_irqrestore(&confirm_error_lock, flags);
+	return rc;
 }
 
-EXPORT_SYMBOL(eeh_dn_check_failure);
+EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
 
 /**
  * eeh_check_failure - check if all 1's data is due to EEH slot freeze
  * @token i/o token, should be address in the form 0xA....
  * @val value, should be all 1's (XXX why do we need this arg??)
  *
- * Check for an eeh failure at the given token address.
  * Check for an EEH failure at the given token address.  Call this
  * routine if the result of a read was all 0xff's and you want to
  * find out if this is due to an EEH slot freeze event.  This routine
@@ -656,8 +683,10 @@
 	/* Finding the phys addr + pci device; this is pretty quick. */
 	addr = eeh_token_to_phys((unsigned long __force) token);
 	dev = pci_get_device_by_addr(addr);
-	if (!dev)
+	if (!dev) {
+		__get_cpu_var(no_device)++;
 		return val;
+	}
 
 	dn = pci_device_to_OF_node(dev);
 	eeh_dn_check_failure (dn, dev);
@@ -668,6 +697,217 @@
 
 EXPORT_SYMBOL(eeh_check_failure);
 
+/* ------------------------------------------------------------- */
+/* The code below deals with error recovery */
+
+/** Return negative value if a permanent error, else return
+ * a number of milliseconds to wait until the PCI slot is
+ * ready to be used.
+ */
+static int
+eeh_slot_availability(struct pci_dn *pdn)
+{
+	int rc;
+	int rets[3];
+
+	rc = read_slot_reset_state(pdn, rets);
+
+	if (rc) return rc;
+
+	if (rets[1] == 0) return -1;  /* EEH is not supported */
+	if (rets[0] == 0)  return 0;  /* Oll Korrect */
+	if (rets[0] == 5) {
+		if (rets[2] == 0) return -1; /* permanently unavailable */
+		return rets[2]; /* number of millisecs to wait */
+	}
+	return -1;
+}
+
+/** rtas_pci_slot_reset raises/lowers the pci #RST line
+ *  state: 1/0 to raise/lower the #RST
+ *
+ * Clear the EEH-frozen condition on a slot.  This routine
+ * asserts the PCI #RST line if the 'state' argument is '1',
+ * and drops the #RST line if 'state is '0'.  This routine is
+ * safe to call in an interrupt context.
+ *
+ */
+
+static void
+rtas_pci_slot_reset(struct pci_dn *pdn, int state)
+{
+	int rc;
+
+	BUG_ON (pdn==NULL); 
+
+	if (!pdn->phb) {
+		printk (KERN_WARNING "EEH: in slot reset, device node %s has no phb\n",
+		        pdn->node->full_name);
+		return;
+	}
+
+	rc = rtas_call(ibm_set_slot_reset,4,1, NULL,
+	               pdn->eeh_config_addr,
+	               BUID_HI(pdn->phb->buid),
+	               BUID_LO(pdn->phb->buid),
+	               state);
+	if (rc) {
+		printk (KERN_WARNING "EEH: Unable to reset the failed slot, (%d) #RST=%d dn=%s\n", 
+		        rc, state, pdn->node->full_name);
+		return;
+	}
+}
+
+/** rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
+ *  dn -- device node to be reset.
+ */
+
+void
+rtas_set_slot_reset(struct pci_dn *pdn)
+{
+	int i, rc;
+
+	rtas_pci_slot_reset (pdn, 1);
+
+	/* The PCI bus requires that the reset be held high for at least
+	 * a 100 milliseconds. We wait a bit longer 'just in case'.  */
+
+#define PCI_BUS_RST_HOLD_TIME_MSEC 250
+	msleep (PCI_BUS_RST_HOLD_TIME_MSEC);
+	
+	/* We might get hit with another EEH freeze as soon as the 
+	 * pci slot reset line is dropped. Make sure we don't miss
+	 * these, and clear the flag now. */
+	eeh_clear_slot (pdn->node, EEH_MODE_ISOLATED);
+
+	rtas_pci_slot_reset (pdn, 0);
+
+	/* After a PCI slot has been reset, the PCI Express spec requires
+	 * a 1.5 second idle time for the bus to stabilize, before starting
+	 * up traffic. */
+#define PCI_BUS_SETTLE_TIME_MSEC 1800
+	msleep (PCI_BUS_SETTLE_TIME_MSEC);
+
+	/* Now double check with the firmware to make sure the device is
+	 * ready to be used; if not, wait for recovery. */
+	for (i=0; i<10; i++) {
+		rc = eeh_slot_availability (pdn);
+		if (rc <= 0) break;
+
+		msleep (rc+100);
+	}
+}
+
+/* ------------------------------------------------------- */
+/** Save and restore of PCI BARs
+ *
+ * Although firmware will set up BARs during boot, it doesn't
+ * set up device BAR's after a device reset, although it will,
+ * if requested, set up bridge configuration. Thus, we need to
+ * configure the PCI devices ourselves.  
+ */
+
+/**
+ * __restore_bars - Restore the Base Address Registers
+ * Loads the PCI configuration space base address registers,
+ * the expansion ROM base address, the latency timer, and etc.
+ * from the saved values in the device node.
+ */
+static inline void __restore_bars (struct pci_dn *pdn)
+{
+	int i;
+
+	if (NULL==pdn->phb) return;
+	for (i=4; i<10; i++) {
+		rtas_write_config(pdn, i*4, 4, pdn->config_space[i]);
+	}
+
+	/* 12 == Expansion ROM Address */
+	rtas_write_config(pdn, 12*4, 4, pdn->config_space[12]);
+
+#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
+#define SAVED_BYTE(OFF) (((u8 *)(pdn->config_space))[BYTE_SWAP(OFF)])
+
+	rtas_write_config (pdn, PCI_CACHE_LINE_SIZE, 1,
+	            SAVED_BYTE(PCI_CACHE_LINE_SIZE));
+
+	rtas_write_config (pdn, PCI_LATENCY_TIMER, 1,
+	            SAVED_BYTE(PCI_LATENCY_TIMER));
+
+	/* max latency, min grant, interrupt pin and line */
+	rtas_write_config(pdn, 15*4, 4, pdn->config_space[15]);
+}
+
+/**
+ * eeh_restore_bars - restore the PCI config space info
+ *
+ * This routine performs a recursive walk to the children
+ * of this device as well.
+ */
+void eeh_restore_bars(struct pci_dn *pdn)
+{
+	struct device_node *dn;
+	if (!pdn) 
+		return;
+	
+	if (! pdn->eeh_is_bridge)
+		__restore_bars (pdn);
+
+	dn = pdn->node->child;
+	while (dn) {
+		eeh_restore_bars (PCI_DN(dn));
+		dn = dn->sibling;
+	}
+}
+
+/**
+ * eeh_save_bars - save device bars
+ *
+ * Save the values of the device bars. Unlike the restore
+ * routine, this routine is *not* recursive. This is because
+ * PCI devices are added individuallly; but, for the restore,
+ * an entire slot is reset at a time.
+ */
+static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn)
+{
+	int i;
+
+	if (!pdev || !pdn )
+		return;
+	
+	for (i = 0; i < 16; i++)
+		pci_read_config_dword(pdev, i * 4, &pdn->config_space[i]);
+
+	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+		pdn->eeh_is_bridge = 1;
+}
+
+void
+rtas_configure_bridge(struct pci_dn *pdn)
+{
+	int token = rtas_token ("ibm,configure-bridge");
+	int rc;
+
+	if (token == RTAS_UNKNOWN_SERVICE)
+		return;
+	rc = rtas_call(token,3,1, NULL,
+	               pdn->eeh_config_addr,
+	               BUID_HI(pdn->phb->buid),
+	               BUID_LO(pdn->phb->buid));
+	if (rc) {
+		printk (KERN_WARNING "EEH: Unable to configure device bridge (%d) for %s\n",
+		        rc, pdn->node->full_name);
+	}
+}
+
+/* ------------------------------------------------------------- */
+/* The code below deals with enabling EEH for devices during  the
+ * early boot sequence.  EEH must be enabled before any PCI probing
+ * can be done.
+ */
+
+#define EEH_ENABLE 1
+
 struct eeh_early_enable_info {
 	unsigned int buid_hi;
 	unsigned int buid_lo;
@@ -684,9 +924,11 @@
 	u32 *device_id = (u32 *)get_property(dn, "device-id", NULL);
 	u32 *regs;
 	int enable;
-	struct pci_dn *pdn = dn->data;
+	struct pci_dn *pdn = PCI_DN(dn);
 
 	pdn->eeh_mode = 0;
+	pdn->eeh_check_count = 0;
+	pdn->eeh_freeze_count = 0;
 
 	if (status && strcmp(status, "ok") != 0)
 		return NULL;	/* ignore devices with bad status */
@@ -723,8 +965,9 @@
 		/* First register entry is addr (00BBSS00)  */
 		/* Try to enable eeh */
 		ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
-				regs[0], info->buid_hi, info->buid_lo,
-				EEH_ENABLE);
+		                regs[0], info->buid_hi, info->buid_lo,
+		                EEH_ENABLE);
+
 		if (ret == 0) {
 			eeh_subsystem_enabled = 1;
 			pdn->eeh_mode |= EEH_MODE_SUPPORTED;
@@ -736,7 +979,7 @@
 
 			/* This device doesn't support EEH, but it may have an
 			 * EEH parent, in which case we mark it as supported. */
-			if (dn->parent && dn->parent->data
+			if (dn->parent && PCI_DN(dn->parent)
 			    && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
 				/* Parent supports EEH. */
 				pdn->eeh_mode |= EEH_MODE_SUPPORTED;
@@ -749,7 +992,7 @@
 		       dn->full_name);
 	}
 
-	return NULL; 
+	return NULL;
 }
 
 /*
@@ -770,6 +1013,9 @@
 	struct device_node *phb, *np;
 	struct eeh_early_enable_info info;
 
+	spin_lock_init(&confirm_error_lock);
+	spin_lock_init(&slot_errbuf_lock);
+
 	np = of_find_node_by_path("/rtas");
 	if (np == NULL)
 		return;
@@ -797,13 +1043,11 @@
 	for (phb = of_find_node_by_name(NULL, "pci"); phb;
 	     phb = of_find_node_by_name(phb, "pci")) {
 		unsigned long buid;
-		struct pci_dn *pci;
 
 		buid = get_phb_buid(phb);
-		if (buid == 0 || phb->data == NULL)
+		if (buid == 0 || PCI_DN(phb) == NULL)
 			continue;
 
-		pci = phb->data;
 		info.buid_lo = BUID_LO(buid);
 		info.buid_hi = BUID_HI(buid);
 		traverse_pci_devices(phb, early_enable_eeh, &info);
@@ -832,11 +1076,13 @@
 	struct pci_controller *phb;
 	struct eeh_early_enable_info info;
 
-	if (!dn || !dn->data)
+	if (!dn || !PCI_DN(dn))
 		return;
 	phb = PCI_DN(dn)->phb;
 	if (NULL == phb || 0 == phb->buid) {
-		printk(KERN_WARNING "EEH: Expected buid but found none\n");
+		printk(KERN_WARNING "EEH: Expected buid but found none for %s\n",
+		       dn->full_name);
+		dump_stack();
 		return;
 	}
 
@@ -844,7 +1090,7 @@
 	info.buid_lo = BUID_LO(phb->buid);
 	early_enable_eeh(dn, &info);
 }
-EXPORT_SYMBOL(eeh_add_device_early);
+EXPORT_SYMBOL_GPL(eeh_add_device_early);
 
 /**
  * eeh_add_device_late - perform EEH initialization for the indicated pci device
@@ -855,6 +1101,9 @@
  */
 void eeh_add_device_late(struct pci_dev *dev)
 {
+	struct device_node *dn;
+	struct pci_dn *pdn;
+
 	if (!dev || !eeh_subsystem_enabled)
 		return;
 
@@ -862,9 +1111,15 @@
 	printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev));
 #endif
 
+	pci_dev_get (dev);
+	dn = pci_device_to_OF_node(dev);
+	pdn = PCI_DN(dn);
+	pdn->pcidev = dev;
+
 	pci_addr_cache_insert_device (dev);
+	eeh_save_bars(dev, pdn);
 }
-EXPORT_SYMBOL(eeh_add_device_late);
+EXPORT_SYMBOL_GPL(eeh_add_device_late);
 
 /**
  * eeh_remove_device - undo EEH setup for the indicated pci device
@@ -875,6 +1130,7 @@
  */
 void eeh_remove_device(struct pci_dev *dev)
 {
+	struct device_node *dn;
 	if (!dev || !eeh_subsystem_enabled)
 		return;
 
@@ -883,20 +1139,29 @@
 	printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev));
 #endif
 	pci_addr_cache_remove_device(dev);
+
+	dn = pci_device_to_OF_node(dev);
+	PCI_DN(dn)->pcidev = NULL;
+	pci_dev_put (dev);
 }
-EXPORT_SYMBOL(eeh_remove_device);
+EXPORT_SYMBOL_GPL(eeh_remove_device);
 
 static int proc_eeh_show(struct seq_file *m, void *v)
 {
 	unsigned int cpu;
 	unsigned long ffs = 0, positives = 0, failures = 0;
 	unsigned long resets = 0;
+	unsigned long no_dev = 0, no_dn = 0, no_cfg = 0, no_check = 0;
 
 	for_each_cpu(cpu) {
 		ffs += per_cpu(total_mmio_ffs, cpu);
 		positives += per_cpu(false_positives, cpu);
 		failures += per_cpu(ignored_failures, cpu);
 		resets += per_cpu(slot_resets, cpu);
+		no_dev += per_cpu(no_device, cpu);
+		no_dn += per_cpu(no_dn, cpu);
+		no_cfg += per_cpu(no_cfg_addr, cpu);
+		no_check += per_cpu(ignored_check, cpu);
 	}
 
 	if (0 == eeh_subsystem_enabled) {
@@ -904,13 +1169,17 @@
 		seq_printf(m, "eeh_total_mmio_ffs=%ld\n", ffs);
 	} else {
 		seq_printf(m, "EEH Subsystem is enabled\n");
-		seq_printf(m, "eeh_total_mmio_ffs=%ld\n"
-			   "eeh_false_positives=%ld\n"
-			   "eeh_ignored_failures=%ld\n"
-			   "eeh_slot_resets=%ld\n"
-				"eeh_fail_count=%d\n",
-			   ffs, positives, failures, resets,
-				eeh_fail_count.counter);
+		seq_printf(m,
+				"no device=%ld\n"
+				"no device node=%ld\n"
+				"no config address=%ld\n"
+				"check not wanted=%ld\n"
+				"eeh_total_mmio_ffs=%ld\n"
+				"eeh_false_positives=%ld\n"
+				"eeh_ignored_failures=%ld\n"
+				"eeh_slot_resets=%ld\n",
+				no_dev, no_dn, no_cfg, no_check,
+				ffs, positives, failures, resets);
 	}
 
 	return 0;
@@ -932,7 +1201,7 @@
 {
 	struct proc_dir_entry *e;
 
-	if (systemcfg->platform & PLATFORM_PSERIES) {
+	if (platform_is_pseries()) {
 		e = create_proc_entry("ppc64/eeh", 0, NULL);
 		if (e)
 			e->proc_fops = &proc_eeh_operations;
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
new file mode 100644
index 0000000..9249733
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -0,0 +1,155 @@
+/*
+ * eeh_event.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
+ */
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <asm/eeh_event.h>
+
+/** Overview:
+ *  EEH error states may be detected within exception handlers;
+ *  however, the recovery processing needs to occur asynchronously
+ *  in a normal kernel context and not an interrupt context.
+ *  This pair of routines creates an event and queues it onto a
+ *  work-queue, where a worker thread can drive recovery.
+ */
+
+/* EEH event workqueue setup. */
+static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED;
+LIST_HEAD(eeh_eventlist);
+static void eeh_thread_launcher(void *);
+DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+
+/**
+ * eeh_panic - call panic() for an eeh event that cannot be handled.
+ * The philosophy of this routine is that it is better to panic and
+ * halt the OS than it is to risk possible data corruption by
+ * oblivious device drivers that don't know better.
+ *
+ * @dev pci device that had an eeh event
+ * @reset_state current reset state of the device slot
+ */
+static void eeh_panic(struct pci_dev *dev, int reset_state)
+{
+	/*
+	 * Since the panic_on_oops sysctl is used to halt the system
+	 * in light of potential corruption, we can use it here.
+	 */
+	if (panic_on_oops) {
+		panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
+		      pci_name(dev));
+	}
+	else {
+		printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
+		       reset_state, pci_name(dev));
+	}
+}
+
+/**
+ * eeh_event_handler - dispatch EEH events.  The detection of a frozen
+ * slot can occur inside an interrupt, where it can be hard to do
+ * anything about it.  The goal of this routine is to pull these
+ * detection events out of the context of the interrupt handler, and
+ * re-dispatch them for processing at a later time in a normal context.
+ *
+ * @dummy - unused
+ */
+static int eeh_event_handler(void * dummy)
+{
+	unsigned long flags;
+	struct eeh_event	*event;
+
+	daemonize ("eehd");
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock_irqsave(&eeh_eventlist_lock, flags);
+		event = NULL;
+		if (!list_empty(&eeh_eventlist)) {
+			event = list_entry(eeh_eventlist.next, struct eeh_event, list);
+			list_del(&event->list);
+		}
+		spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
+		if (event == NULL)
+			break;
+
+		printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
+		       pci_name(event->dev));
+
+		eeh_panic (event->dev, event->state);
+
+		kfree(event);
+	}
+
+	return 0;
+}
+
+/**
+ * eeh_thread_launcher
+ *
+ * @dummy - unused
+ */
+static void eeh_thread_launcher(void *dummy)
+{
+	if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
+		printk(KERN_ERR "Failed to start EEH daemon\n");
+}
+
+/**
+ * eeh_send_failure_event - generate a PCI error event
+ * @dev pci device
+ *
+ * This routine can be called within an interrupt context;
+ * the actual event will be delivered in a normal context
+ * (from a workqueue).
+ */
+int eeh_send_failure_event (struct device_node *dn,
+                            struct pci_dev *dev,
+                            int state,
+                            int time_unavail)
+{
+	unsigned long flags;
+	struct eeh_event *event;
+
+	event = kmalloc(sizeof(*event), GFP_ATOMIC);
+	if (event == NULL) {
+		printk (KERN_ERR "EEH: out of memory, event not handled\n");
+		return 1;
+ 	}
+
+	if (dev)
+		pci_dev_get(dev);
+
+	event->dn = dn;
+	event->dev = dev;
+	event->state = state;
+	event->time_unavail = time_unavail;
+
+	/* We may or may not be called in an interrupt context */
+	spin_lock_irqsave(&eeh_eventlist_lock, flags);
+	list_add(&event->list, &eeh_eventlist);
+	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
+
+	schedule_work(&eeh_event_wq);
+
+	return 0;
+}
+
+/********************** END OF FILE ******************************/
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index fcc50bf..97ba521 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -42,7 +42,6 @@
 #include <asm/machdep.h>
 #include <asm/abs_addr.h>
 #include <asm/pSeries_reconfig.h>
-#include <asm/systemcfg.h>
 #include <asm/firmware.h>
 #include <asm/tce.h>
 #include <asm/ppc-pci.h>
@@ -582,7 +581,7 @@
 		return;
 	}
 
-	if (systemcfg->platform & PLATFORM_LPAR) {
+	if (platform_is_lpar()) {
 		if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
 			ppc_md.tce_build = tce_buildmulti_pSeriesLP;
 			ppc_md.tce_free	 = tce_freemulti_pSeriesLP;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index c198656..999a962 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -107,7 +107,6 @@
 
 void __init pSeries_final_fixup(void)
 {
-	phbs_remap_io();
 	pSeries_request_regions();
 
 	pci_addr_cache_build();
@@ -123,7 +122,7 @@
 	int i;
 	unsigned int reg;
 
-	if (!(systemcfg->platform & PLATFORM_PSERIES))
+	if (!platform_is_pseries())
 		return;
 
 	printk("Using INTC for W82c105 IDE controller.\n");
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index d7d4003..d886416 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -408,7 +408,7 @@
 {
 	struct proc_dir_entry *ent;
 
-	if (!(systemcfg->platform & PLATFORM_PSERIES))
+	if (!platform_is_pseries())
 		return 0;
 
 	ent = create_proc_entry("ppc64/ofdt", S_IWUSR, NULL);
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index e26b042..00cf331 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -482,10 +482,12 @@
 {
 	struct proc_dir_entry *entry;
 
-	/* No RTAS, only warn if we are on a pSeries box  */
+	if (!platform_is_pseries())
+		return 0;
+
+	/* No RTAS */
 	if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) {
-		if (systemcfg->platform & PLATFORM_PSERIES)
-			printk(KERN_INFO "rtasd: no event-scan on system\n");
+		printk(KERN_INFO "rtasd: no event-scan on system\n");
 		return 1;
 	}
 
diff --git a/arch/ppc64/kernel/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
similarity index 100%
rename from arch/ppc64/kernel/scanlog.c
rename to arch/powerpc/platforms/pseries/scanlog.c
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a093a0d..e94247c 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -249,7 +249,7 @@
 		ppc_md.idle_loop = default_idle;
 	}
 
-	if (systemcfg->platform & PLATFORM_LPAR)
+	if (platform_is_lpar())
 		ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
 	else
 		ppc_md.enable_pmcs = power4_enable_pmcs;
@@ -378,7 +378,7 @@
 
 	fw_feature_init();
 	
-	if (systemcfg->platform & PLATFORM_LPAR)
+	if (platform_is_lpar())
 		hpte_init_lpar();
 	else {
 		hpte_init_native();
@@ -388,7 +388,7 @@
 
 	generic_find_legacy_serial_ports(&physport, &default_speed);
 
-	if (systemcfg->platform & PLATFORM_LPAR)
+	if (platform_is_lpar())
 		find_udbg_vterm();
 	else if (physport) {
 		/* Map the uart for udbg. */
@@ -592,7 +592,7 @@
 
 static int pSeries_pci_probe_mode(struct pci_bus *bus)
 {
-	if (systemcfg->platform & PLATFORM_LPAR)
+	if (platform_is_lpar())
 		return PCI_PROBE_DEVTREE;
 	return PCI_PROBE_NORMAL;
 }
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 7a243e8..3ba794c 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -46,6 +46,7 @@
 #include <asm/rtas.h>
 #include <asm/pSeries_reconfig.h>
 #include <asm/mpic.h>
+#include <asm/systemcfg.h>
 
 #include "plpar_wrappers.h"
 
@@ -96,7 +97,7 @@
 	int cpu = smp_processor_id();
 
 	cpu_clear(cpu, cpu_online_map);
-	systemcfg->processorCount--;
+	_systemcfg->processorCount--;
 
 	/*fix boot_cpuid here*/
 	if (cpu == boot_cpuid)
@@ -441,7 +442,7 @@
 	smp_ops->cpu_die = pSeries_cpu_die;
 
 	/* Processors can be added/removed only on LPAR */
-	if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
+	if (platform_is_lpar())
 		pSeries_reconfig_notifier_register(&pSeries_smp_nb);
 #endif
 
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index c72c86f..72ac180 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -545,7 +545,9 @@
 		of_node_put(np);
 	}
 
-	if (systemcfg->platform == PLATFORM_PSERIES) {
+	if (platform_is_lpar())
+		ops = &pSeriesLP_ops;
+	else {
 #ifdef CONFIG_SMP
 		for_each_cpu(i) {
 			int hard_id;
@@ -561,12 +563,11 @@
 #else
 		xics_per_cpu[0] = ioremap(intr_base, intr_size);
 #endif /* CONFIG_SMP */
-	} else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
-		ops = &pSeriesLP_ops;
 	}
 
 	xics_8259_pic.enable = i8259_pic.enable;
 	xics_8259_pic.disable = i8259_pic.disable;
+	xics_8259_pic.end = i8259_pic.end;
 	for (i = 0; i < 16; ++i)
 		get_irq_desc(i)->handler = &xics_8259_pic;
 	for (; i < NR_IRQS; ++i)
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c
index 543d659..f32baf7 100644
--- a/arch/powerpc/sysdev/u3_iommu.c
+++ b/arch/powerpc/sysdev/u3_iommu.c
@@ -226,7 +226,7 @@
 	iommu_table_u3.it_busno = 0;
 	iommu_table_u3.it_offset = 0;
 	/* it_size is in number of entries */
-	iommu_table_u3.it_size = dart_tablesize / sizeof(u32);
+	iommu_table_u3.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR;
 
 	/* Initialize the common IOMMU code */
 	iommu_table_u3.it_base = (unsigned long)dart_vbase;
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 79a784f..b20312e 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -8,4 +8,4 @@
 obj-$(CONFIG_6xx)	+= start_32.o
 obj-$(CONFIG_4xx)	+= start_32.o
 obj-$(CONFIG_PPC64)	+= start_64.o
-obj-y			+= xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o
+obj-y			+= xmon.o ppc-dis.o ppc-opc.o setjmp.o nonstdio.o
diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
new file mode 100644
index 0000000..7876583
--- /dev/null
+++ b/arch/powerpc/xmon/nonstdio.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+#include <linux/string.h>
+#include <asm/time.h>
+#include "nonstdio.h"
+
+int xmon_putchar(int c)
+{
+	char ch = c;
+
+	if (c == '\n')
+		xmon_putchar('\r');
+	return xmon_write(&ch, 1) == 1? c: -1;
+}
+
+static char line[256];
+static char *lineptr;
+static int lineleft;
+
+int xmon_expect(const char *str, unsigned long timeout)
+{
+	int c;
+	unsigned long t0;
+
+	/* assume 25MHz default timebase if tb_ticks_per_sec not set yet */
+	timeout *= tb_ticks_per_sec? tb_ticks_per_sec: 25000000;
+	t0 = get_tbl();
+	do {
+		lineptr = line;
+		for (;;) {
+			c = xmon_read_poll();
+			if (c == -1) {
+				if (get_tbl() - t0 > timeout)
+					return 0;
+				continue;
+			}
+			if (c == '\n')
+				break;
+			if (c != '\r' && lineptr < &line[sizeof(line) - 1])
+				*lineptr++ = c;
+		}
+		*lineptr = 0;
+	} while (strstr(line, str) == NULL);
+	return 1;
+}
+
+int xmon_getchar(void)
+{
+	int c;
+
+	if (lineleft == 0) {
+		lineptr = line;
+		for (;;) {
+			c = xmon_readchar();
+			if (c == -1 || c == 4)
+				break;
+			if (c == '\r' || c == '\n') {
+				*lineptr++ = '\n';
+				xmon_putchar('\n');
+				break;
+			}
+			switch (c) {
+			case 0177:
+			case '\b':
+				if (lineptr > line) {
+					xmon_putchar('\b');
+					xmon_putchar(' ');
+					xmon_putchar('\b');
+					--lineptr;
+				}
+				break;
+			case 'U' & 0x1F:
+				while (lineptr > line) {
+					xmon_putchar('\b');
+					xmon_putchar(' ');
+					xmon_putchar('\b');
+					--lineptr;
+				}
+				break;
+			default:
+				if (lineptr >= &line[sizeof(line) - 1])
+					xmon_putchar('\a');
+				else {
+					xmon_putchar(c);
+					*lineptr++ = c;
+				}
+			}
+		}
+		lineleft = lineptr - line;
+		lineptr = line;
+	}
+	if (lineleft == 0)
+		return -1;
+	--lineleft;
+	return *lineptr++;
+}
+
+char *xmon_gets(char *str, int nb)
+{
+	char *p;
+	int c;
+
+	for (p = str; p < str + nb - 1; ) {
+		c = xmon_getchar();
+		if (c == -1) {
+			if (p == str)
+				return NULL;
+			break;
+		}
+		*p++ = c;
+		if (c == '\n')
+			break;
+	}
+	*p = 0;
+	return str;
+}
+
+void xmon_printf(const char *format, ...)
+{
+	va_list args;
+	int n;
+	static char xmon_outbuf[1024];
+
+	va_start(args, format);
+	n = vsnprintf(xmon_outbuf, sizeof(xmon_outbuf), format, args);
+	va_end(args);
+	xmon_write(xmon_outbuf, n);
+}
diff --git a/arch/powerpc/xmon/nonstdio.h b/arch/powerpc/xmon/nonstdio.h
index 84211a2..47cebbd 100644
--- a/arch/powerpc/xmon/nonstdio.h
+++ b/arch/powerpc/xmon/nonstdio.h
@@ -1,22 +1,14 @@
-typedef int	FILE;
-extern FILE *xmon_stdin, *xmon_stdout;
 #define EOF	(-1)
-#define stdin	xmon_stdin
-#define stdout	xmon_stdout
-#define printf	xmon_printf
-#define fprintf	xmon_fprintf
-#define fputs	xmon_fputs
-#define fgets	xmon_fgets
-#define putchar	xmon_putchar
-#define getchar	xmon_getchar
-#define putc	xmon_putc
-#define getc	xmon_getc
-#define fopen(n, m)	NULL
-#define fflush(f)	do {} while (0)
-#define fclose(f)	do {} while (0)
-extern char *fgets(char *, int, void *);
-extern void xmon_printf(const char *, ...);
-extern void xmon_fprintf(void *, const char *, ...);
-extern void xmon_sprintf(char *, const char *, ...);
 
-#define perror(s)	printf("%s: no files!\n", (s))
+#define printf	xmon_printf
+#define putchar	xmon_putchar
+
+extern int xmon_putchar(int c);
+extern int xmon_getchar(void);
+extern char *xmon_gets(char *, int);
+extern void xmon_printf(const char *, ...);
+extern void xmon_map_scc(void);
+extern int xmon_expect(const char *str, unsigned long timeout);
+extern int xmon_write(void *ptr, int nb);
+extern int xmon_readchar(void);
+extern int xmon_read_poll(void);
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/xmon/setjmp.S
index f8e40df..96a91f1 100644
--- a/arch/powerpc/xmon/setjmp.S
+++ b/arch/powerpc/xmon/setjmp.S
@@ -14,61 +14,61 @@
 
 _GLOBAL(xmon_setjmp)
 	mflr	r0
-	STL	r0,0(r3)
-	STL	r1,SZL(r3)
-	STL	r2,2*SZL(r3)
+	PPC_STL	r0,0(r3)
+	PPC_STL	r1,SZL(r3)
+	PPC_STL	r2,2*SZL(r3)
 	mfcr	r0
-	STL	r0,3*SZL(r3)
-	STL	r13,4*SZL(r3)
-	STL	r14,5*SZL(r3)
-	STL	r15,6*SZL(r3)
-	STL	r16,7*SZL(r3)
-	STL	r17,8*SZL(r3)
-	STL	r18,9*SZL(r3)
-	STL	r19,10*SZL(r3)
-	STL	r20,11*SZL(r3)
-	STL	r21,12*SZL(r3)
-	STL	r22,13*SZL(r3)
-	STL	r23,14*SZL(r3)
-	STL	r24,15*SZL(r3)
-	STL	r25,16*SZL(r3)
-	STL	r26,17*SZL(r3)
-	STL	r27,18*SZL(r3)
-	STL	r28,19*SZL(r3)
-	STL	r29,20*SZL(r3)
-	STL	r30,21*SZL(r3)
-	STL	r31,22*SZL(r3)
+	PPC_STL	r0,3*SZL(r3)
+	PPC_STL	r13,4*SZL(r3)
+	PPC_STL	r14,5*SZL(r3)
+	PPC_STL	r15,6*SZL(r3)
+	PPC_STL	r16,7*SZL(r3)
+	PPC_STL	r17,8*SZL(r3)
+	PPC_STL	r18,9*SZL(r3)
+	PPC_STL	r19,10*SZL(r3)
+	PPC_STL	r20,11*SZL(r3)
+	PPC_STL	r21,12*SZL(r3)
+	PPC_STL	r22,13*SZL(r3)
+	PPC_STL	r23,14*SZL(r3)
+	PPC_STL	r24,15*SZL(r3)
+	PPC_STL	r25,16*SZL(r3)
+	PPC_STL	r26,17*SZL(r3)
+	PPC_STL	r27,18*SZL(r3)
+	PPC_STL	r28,19*SZL(r3)
+	PPC_STL	r29,20*SZL(r3)
+	PPC_STL	r30,21*SZL(r3)
+	PPC_STL	r31,22*SZL(r3)
 	li	r3,0
 	blr
 
 _GLOBAL(xmon_longjmp)
-	CMPI	r4,0
+	PPC_LCMPI r4,0
 	bne	1f
 	li	r4,1
-1:	LDL	r13,4*SZL(r3)
-	LDL	r14,5*SZL(r3)
-	LDL	r15,6*SZL(r3)
-	LDL	r16,7*SZL(r3)
-	LDL	r17,8*SZL(r3)
-	LDL	r18,9*SZL(r3)
-	LDL	r19,10*SZL(r3)
-	LDL	r20,11*SZL(r3)
-	LDL	r21,12*SZL(r3)
-	LDL	r22,13*SZL(r3)
-	LDL	r23,14*SZL(r3)
-	LDL	r24,15*SZL(r3)
-	LDL	r25,16*SZL(r3)
-	LDL	r26,17*SZL(r3)
-	LDL	r27,18*SZL(r3)
-	LDL	r28,19*SZL(r3)
-	LDL	r29,20*SZL(r3)
-	LDL	r30,21*SZL(r3)
-	LDL	r31,22*SZL(r3)
-	LDL	r0,3*SZL(r3)
+1:	PPC_LL	r13,4*SZL(r3)
+	PPC_LL	r14,5*SZL(r3)
+	PPC_LL	r15,6*SZL(r3)
+	PPC_LL	r16,7*SZL(r3)
+	PPC_LL	r17,8*SZL(r3)
+	PPC_LL	r18,9*SZL(r3)
+	PPC_LL	r19,10*SZL(r3)
+	PPC_LL	r20,11*SZL(r3)
+	PPC_LL	r21,12*SZL(r3)
+	PPC_LL	r22,13*SZL(r3)
+	PPC_LL	r23,14*SZL(r3)
+	PPC_LL	r24,15*SZL(r3)
+	PPC_LL	r25,16*SZL(r3)
+	PPC_LL	r26,17*SZL(r3)
+	PPC_LL	r27,18*SZL(r3)
+	PPC_LL	r28,19*SZL(r3)
+	PPC_LL	r29,20*SZL(r3)
+	PPC_LL	r30,21*SZL(r3)
+	PPC_LL	r31,22*SZL(r3)
+	PPC_LL	r0,3*SZL(r3)
 	mtcrf	0x38,r0
-	LDL	r0,0(r3)
-	LDL	r1,SZL(r3)
-	LDL	r2,2*SZL(r3)
+	PPC_LL	r0,0(r3)
+	PPC_LL	r1,SZL(r3)
+	PPC_LL	r2,2*SZL(r3)
 	mtlr	r0
 	mr	r3,r4
 	blr
@@ -84,52 +84,52 @@
  * different ABIs, though).
  */
 _GLOBAL(xmon_save_regs)
-	STL	r0,0*SZL(r3)
-	STL	r2,2*SZL(r3)
-	STL	r3,3*SZL(r3)
-	STL	r4,4*SZL(r3)
-	STL	r5,5*SZL(r3)
-	STL	r6,6*SZL(r3)
-	STL	r7,7*SZL(r3)
-	STL	r8,8*SZL(r3)
-	STL	r9,9*SZL(r3)
-	STL	r10,10*SZL(r3)
-	STL	r11,11*SZL(r3)
-	STL	r12,12*SZL(r3)
-	STL	r13,13*SZL(r3)
-	STL	r14,14*SZL(r3)
-	STL	r15,15*SZL(r3)
-	STL	r16,16*SZL(r3)
-	STL	r17,17*SZL(r3)
-	STL	r18,18*SZL(r3)
-	STL	r19,19*SZL(r3)
-	STL	r20,20*SZL(r3)
-	STL	r21,21*SZL(r3)
-	STL	r22,22*SZL(r3)
-	STL	r23,23*SZL(r3)
-	STL	r24,24*SZL(r3)
-	STL	r25,25*SZL(r3)
-	STL	r26,26*SZL(r3)
-	STL	r27,27*SZL(r3)
-	STL	r28,28*SZL(r3)
-	STL	r29,29*SZL(r3)
-	STL	r30,30*SZL(r3)
-	STL	r31,31*SZL(r3)
+	PPC_STL	r0,0*SZL(r3)
+	PPC_STL	r2,2*SZL(r3)
+	PPC_STL	r3,3*SZL(r3)
+	PPC_STL	r4,4*SZL(r3)
+	PPC_STL	r5,5*SZL(r3)
+	PPC_STL	r6,6*SZL(r3)
+	PPC_STL	r7,7*SZL(r3)
+	PPC_STL	r8,8*SZL(r3)
+	PPC_STL	r9,9*SZL(r3)
+	PPC_STL	r10,10*SZL(r3)
+	PPC_STL	r11,11*SZL(r3)
+	PPC_STL	r12,12*SZL(r3)
+	PPC_STL	r13,13*SZL(r3)
+	PPC_STL	r14,14*SZL(r3)
+	PPC_STL	r15,15*SZL(r3)
+	PPC_STL	r16,16*SZL(r3)
+	PPC_STL	r17,17*SZL(r3)
+	PPC_STL	r18,18*SZL(r3)
+	PPC_STL	r19,19*SZL(r3)
+	PPC_STL	r20,20*SZL(r3)
+	PPC_STL	r21,21*SZL(r3)
+	PPC_STL	r22,22*SZL(r3)
+	PPC_STL	r23,23*SZL(r3)
+	PPC_STL	r24,24*SZL(r3)
+	PPC_STL	r25,25*SZL(r3)
+	PPC_STL	r26,26*SZL(r3)
+	PPC_STL	r27,27*SZL(r3)
+	PPC_STL	r28,28*SZL(r3)
+	PPC_STL	r29,29*SZL(r3)
+	PPC_STL	r30,30*SZL(r3)
+	PPC_STL	r31,31*SZL(r3)
 	/* go up one stack frame for SP */
-	LDL	r4,0(r1)
-	STL	r4,1*SZL(r3)
+	PPC_LL	r4,0(r1)
+	PPC_STL	r4,1*SZL(r3)
 	/* get caller's LR */
-	LDL	r0,LRSAVE(r4)
-	STL	r0,_NIP-STACK_FRAME_OVERHEAD(r3)
-	STL	r0,_LINK-STACK_FRAME_OVERHEAD(r3)
+	PPC_LL	r0,LRSAVE(r4)
+	PPC_STL	r0,_NIP-STACK_FRAME_OVERHEAD(r3)
+	PPC_STL	r0,_LINK-STACK_FRAME_OVERHEAD(r3)
 	mfmsr	r0
-	STL	r0,_MSR-STACK_FRAME_OVERHEAD(r3)
+	PPC_STL	r0,_MSR-STACK_FRAME_OVERHEAD(r3)
 	mfctr	r0
-	STL	r0,_CTR-STACK_FRAME_OVERHEAD(r3)
+	PPC_STL	r0,_CTR-STACK_FRAME_OVERHEAD(r3)
 	mfxer	r0
-	STL	r0,_XER-STACK_FRAME_OVERHEAD(r3)
+	PPC_STL	r0,_XER-STACK_FRAME_OVERHEAD(r3)
 	mfcr	r0
-	STL	r0,_CCR-STACK_FRAME_OVERHEAD(r3)
+	PPC_STL	r0,_CCR-STACK_FRAME_OVERHEAD(r3)
 	li	r0,0
-	STL	r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
+	PPC_STL	r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
 	blr
diff --git a/arch/powerpc/xmon/start_32.c b/arch/powerpc/xmon/start_32.c
index 69b658c..c2464df 100644
--- a/arch/powerpc/xmon/start_32.c
+++ b/arch/powerpc/xmon/start_32.c
@@ -11,7 +11,6 @@
 #include <linux/cuda.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
-#include <linux/sysrq.h>
 #include <linux/bitops.h>
 #include <asm/xmon.h>
 #include <asm/prom.h>
@@ -22,10 +21,11 @@
 #include <asm/processor.h>
 #include <asm/delay.h>
 #include <asm/btext.h>
+#include <asm/time.h>
+#include "nonstdio.h"
 
 static volatile unsigned char __iomem *sccc, *sccd;
 unsigned int TXRDY, RXRDY, DLAB;
-static int xmon_expect(const char *str, unsigned int timeout);
 
 static int use_serial;
 static int use_screen;
@@ -33,16 +33,6 @@
 static int xmon_use_sccb;
 static struct device_node *channel_node;
 
-#define TB_SPEED	25000000
-
-static inline unsigned int readtb(void)
-{
-	unsigned int ret;
-
-	asm volatile("mftb %0" : "=r" (ret) :);
-	return ret;
-}
-
 void buf_access(void)
 {
 	if (DLAB)
@@ -91,23 +81,7 @@
 }
 #endif /* CONFIG_PPC_CHRP */
 
-#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_xmon(int key, struct pt_regs *regs,
-			      struct tty_struct *tty)
-{
-	xmon(regs);
-}
-
-static struct sysrq_key_op sysrq_xmon_op =
-{
-	.handler =	sysrq_handle_xmon,
-	.help_msg =	"Xmon",
-	.action_msg =	"Entering xmon",
-};
-#endif
-
-void
-xmon_map_scc(void)
+void xmon_map_scc(void)
 {
 #ifdef CONFIG_PPC_MULTIPLATFORM
 	volatile unsigned char __iomem *base;
@@ -217,8 +191,6 @@
 	RXRDY = 1;
 	DLAB = 0x80;
 #endif /* platform */
-
-	register_sysrq_key('x', &sysrq_xmon_op);
 }
 
 static int scc_initialized = 0;
@@ -238,8 +210,7 @@
 #endif /* CONFIG_ADB_CUDA */
 }
 
-int
-xmon_write(void *handle, void *ptr, int nb)
+int xmon_write(void *ptr, int nb)
 {
 	char *p = ptr;
 	int i, c, ct;
@@ -311,8 +282,7 @@
 	"\0.\0*\0+\0\0\0\0\0/\r\0-\0"			/* 0x40 - 0x4f */
 	"\0\0000123456789\0\0\0";			/* 0x50 - 0x5f */
 
-static int
-xmon_get_adb_key(void)
+static int xmon_get_adb_key(void)
 {
 	int k, t, on;
 
@@ -350,32 +320,21 @@
 }
 #endif /* CONFIG_BOOTX_TEXT */
 
-int
-xmon_read(void *handle, void *ptr, int nb)
+int xmon_readchar(void)
 {
-    char *p = ptr;
-    int i;
-
 #ifdef CONFIG_BOOTX_TEXT
-    if (use_screen) {
-	for (i = 0; i < nb; ++i)
-	    *p++ = xmon_get_adb_key();
-	return i;
-    }
+	if (use_screen)
+		return xmon_get_adb_key();
 #endif
-    if (!scc_initialized)
-	xmon_init_scc();
-    for (i = 0; i < nb; ++i) {
+	if (!scc_initialized)
+		xmon_init_scc();
 	while ((*sccc & RXRDY) == 0)
-	    do_poll_adb();
+		do_poll_adb();
 	buf_access();
-	*p++ = *sccd;
-    }
-    return i;
+	return *sccd;
 }
 
-int
-xmon_read_poll(void)
+int xmon_read_poll(void)
 {
 	if ((*sccc & RXRDY) == 0) {
 		do_poll_adb();
@@ -395,8 +354,7 @@
     3,  0xc1,		/* rx enable, 8 bits */
 };
 
-void
-xmon_init_scc(void)
+void xmon_init_scc(void)
 {
 	if ( _machine == _MACH_chrp )
 	{
@@ -410,6 +368,7 @@
 	else if ( _machine == _MACH_Pmac )
 	{
 		int i, x;
+		unsigned long timeout;
 
 		if (channel_node != 0)
 			pmac_call_feature(
@@ -424,8 +383,12 @@
 				PMAC_FTR_MODEM_ENABLE,
 				channel_node, 0, 1);
 			printk(KERN_INFO "Modem powered up by debugger !\n");
-			t0 = readtb();
-			while (readtb() - t0 < 3*TB_SPEED)
+			t0 = get_tbl();
+			timeout = 3 * tb_ticks_per_sec;
+			if (timeout == 0)
+				/* assume 25MHz if tb_ticks_per_sec not set */
+				timeout = 75000000;
+			while (get_tbl() - t0 < timeout)
 				eieio();
 		}
 		/* use the B channel if requested */
@@ -447,164 +410,19 @@
 	scc_initialized = 1;
 	if (via_modem) {
 		for (;;) {
-			xmon_write(NULL, "ATE1V1\r", 7);
+			xmon_write("ATE1V1\r", 7);
 			if (xmon_expect("OK", 5)) {
-				xmon_write(NULL, "ATA\r", 4);
+				xmon_write("ATA\r", 4);
 				if (xmon_expect("CONNECT", 40))
 					break;
 			}
-			xmon_write(NULL, "+++", 3);
+			xmon_write("+++", 3);
 			xmon_expect("OK", 3);
 		}
 	}
 }
 
-void *xmon_stdin;
-void *xmon_stdout;
-void *xmon_stderr;
-
-int xmon_putc(int c, void *f)
-{
-    char ch = c;
-
-    if (c == '\n')
-	xmon_putc('\r', f);
-    return xmon_write(f, &ch, 1) == 1? c: -1;
-}
-
-int xmon_putchar(int c)
-{
-	return xmon_putc(c, xmon_stdout);
-}
-
-int xmon_fputs(char *str, void *f)
-{
-	int n = strlen(str);
-
-	return xmon_write(f, str, n) == n? 0: -1;
-}
-
-int
-xmon_readchar(void)
-{
-	char ch;
-
-	for (;;) {
-		switch (xmon_read(xmon_stdin, &ch, 1)) {
-		case 1:
-			return ch;
-		case -1:
-			xmon_printf("read(stdin) returned -1\r\n", 0, 0);
-			return -1;
-		}
-	}
-}
-
-static char line[256];
-static char *lineptr;
-static int lineleft;
-
-int xmon_expect(const char *str, unsigned int timeout)
-{
-	int c;
-	unsigned int t0;
-
-	timeout *= TB_SPEED;
-	t0 = readtb();
-	do {
-		lineptr = line;
-		for (;;) {
-			c = xmon_read_poll();
-			if (c == -1) {
-				if (readtb() - t0 > timeout)
-					return 0;
-				continue;
-			}
-			if (c == '\n')
-				break;
-			if (c != '\r' && lineptr < &line[sizeof(line) - 1])
-				*lineptr++ = c;
-		}
-		*lineptr = 0;
-	} while (strstr(line, str) == NULL);
-	return 1;
-}
-
-int
-xmon_getchar(void)
-{
-    int c;
-
-    if (lineleft == 0) {
-	lineptr = line;
-	for (;;) {
-	    c = xmon_readchar();
-	    if (c == -1 || c == 4)
-		break;
-	    if (c == '\r' || c == '\n') {
-		*lineptr++ = '\n';
-		xmon_putchar('\n');
-		break;
-	    }
-	    switch (c) {
-	    case 0177:
-	    case '\b':
-		if (lineptr > line) {
-		    xmon_putchar('\b');
-		    xmon_putchar(' ');
-		    xmon_putchar('\b');
-		    --lineptr;
-		}
-		break;
-	    case 'U' & 0x1F:
-		while (lineptr > line) {
-		    xmon_putchar('\b');
-		    xmon_putchar(' ');
-		    xmon_putchar('\b');
-		    --lineptr;
-		}
-		break;
-	    default:
-		if (lineptr >= &line[sizeof(line) - 1])
-		    xmon_putchar('\a');
-		else {
-		    xmon_putchar(c);
-		    *lineptr++ = c;
-		}
-	    }
-	}
-	lineleft = lineptr - line;
-	lineptr = line;
-    }
-    if (lineleft == 0)
-	return -1;
-    --lineleft;
-    return *lineptr++;
-}
-
-char *
-xmon_fgets(char *str, int nb, void *f)
-{
-    char *p;
-    int c;
-
-    for (p = str; p < str + nb - 1; ) {
-	c = xmon_getchar();
-	if (c == -1) {
-	    if (p == str)
-		return NULL;
-	    break;
-	}
-	*p++ = c;
-	if (c == '\n')
-	    break;
-    }
-    *p = 0;
-    return str;
-}
-
-void
-xmon_enter(void)
+void xmon_enter(void)
 {
 #ifdef CONFIG_ADB_PMU
 	if (_machine == _MACH_Pmac) {
@@ -613,8 +431,7 @@
 #endif
 }
 
-void
-xmon_leave(void)
+void xmon_leave(void)
 {
 #ifdef CONFIG_ADB_PMU
 	if (_machine == _MACH_Pmac) {
diff --git a/arch/powerpc/xmon/start_64.c b/arch/powerpc/xmon/start_64.c
index e50c158..712552c 100644
--- a/arch/powerpc/xmon/start_64.c
+++ b/arch/powerpc/xmon/start_64.c
@@ -6,182 +6,29 @@
  *      as published by the Free Software Foundation; either version
  *      2 of the License, or (at your option) any later version.
  */
-#include <linux/config.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sysrq.h>
-#include <linux/init.h>
 #include <asm/machdep.h>
-#include <asm/io.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-#include <asm/processor.h>
 #include <asm/udbg.h>
-#include <asm/system.h>
 #include "nonstdio.h"
 
-#ifdef CONFIG_MAGIC_SYSRQ
-
-static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
-			      struct tty_struct *tty) 
+void xmon_map_scc(void)
 {
-	/* ensure xmon is enabled */
-	xmon_init(1);
-	debugger(pt_regs);
 }
 
-static struct sysrq_key_op sysrq_xmon_op = 
-{
-	.handler =	sysrq_handle_xmon,
-	.help_msg =	"Xmon",
-	.action_msg =	"Entering xmon",
-};
-
-static int __init setup_xmon_sysrq(void)
-{
-	register_sysrq_key('x', &sysrq_xmon_op);
-	return 0;
-}
-__initcall(setup_xmon_sysrq);
-#endif /* CONFIG_MAGIC_SYSRQ */
-
-int
-xmon_write(void *handle, void *ptr, int nb)
+int xmon_write(void *ptr, int nb)
 {
 	return udbg_write(ptr, nb);
 }
 
-int
-xmon_read(void *handle, void *ptr, int nb)
+int xmon_readchar(void)
 {
-	return udbg_read(ptr, nb);
+	if (udbg_getc)
+		return udbg_getc();
+	return -1;
 }
 
-int
-xmon_read_poll(void)
+int xmon_read_poll(void)
 {
 	if (udbg_getc_poll)
 		return udbg_getc_poll();
 	return -1;
 }
- 
-FILE *xmon_stdin;
-FILE *xmon_stdout;
-
-int
-xmon_putc(int c, void *f)
-{
-	char ch = c;
-
-	if (c == '\n')
-		xmon_putc('\r', f);
-	return xmon_write(f, &ch, 1) == 1? c: -1;
-}
-
-int
-xmon_putchar(int c)
-{
-	return xmon_putc(c, xmon_stdout);
-}
-
-int
-xmon_fputs(char *str, void *f)
-{
-	int n = strlen(str);
-
-	return xmon_write(f, str, n) == n? 0: -1;
-}
-
-int
-xmon_readchar(void)
-{
-	char ch;
-
-	for (;;) {
-		switch (xmon_read(xmon_stdin, &ch, 1)) {
-		case 1:
-			return ch;
-		case -1:
-			xmon_printf("read(stdin) returned -1\r\n", 0, 0);
-			return -1;
-		}
-	}
-}
-
-static char line[256];
-static char *lineptr;
-static int lineleft;
-
-int
-xmon_getchar(void)
-{
-	int c;
-
-	if (lineleft == 0) {
-		lineptr = line;
-		for (;;) {
-			c = xmon_readchar();
-			if (c == -1 || c == 4)
-				break;
-			if (c == '\r' || c == '\n') {
-				*lineptr++ = '\n';
-				xmon_putchar('\n');
-				break;
-			}
-			switch (c) {
-			case 0177:
-			case '\b':
-				if (lineptr > line) {
-					xmon_putchar('\b');
-					xmon_putchar(' ');
-					xmon_putchar('\b');
-					--lineptr;
-				}
-				break;
-			case 'U' & 0x1F:
-				while (lineptr > line) {
-					xmon_putchar('\b');
-					xmon_putchar(' ');
-					xmon_putchar('\b');
-					--lineptr;
-				}
-				break;
-			default:
-				if (lineptr >= &line[sizeof(line) - 1])
-					xmon_putchar('\a');
-				else {
-					xmon_putchar(c);
-					*lineptr++ = c;
-				}
-			}
-		}
-		lineleft = lineptr - line;
-		lineptr = line;
-	}
-	if (lineleft == 0)
-		return -1;
-	--lineleft;
-	return *lineptr++;
-}
-
-char *
-xmon_fgets(char *str, int nb, void *f)
-{
-	char *p;
-	int c;
-
-	for (p = str; p < str + nb - 1; ) {
-		c = xmon_getchar();
-		if (c == -1) {
-			if (p == str)
-				return NULL;
-			break;
-		}
-		*p++ = c;
-		if (c == '\n')
-			break;
-	}
-	*p = 0;
-	return str;
-}
diff --git a/arch/powerpc/xmon/start_8xx.c b/arch/powerpc/xmon/start_8xx.c
index a48bd59..4c17b04 100644
--- a/arch/powerpc/xmon/start_8xx.c
+++ b/arch/powerpc/xmon/start_8xx.c
@@ -15,273 +15,30 @@
 #include <asm/8xx_immap.h>
 #include <asm/mpc8xx.h>
 #include <asm/commproc.h>
+#include "nonstdio.h"
 
-extern void xmon_printf(const char *fmt, ...);
 extern int xmon_8xx_write(char *str, int nb);
 extern int xmon_8xx_read_poll(void);
 extern int xmon_8xx_read_char(void);
-void prom_drawhex(uint);
-void prom_drawstring(const char *str);
 
-static int use_screen = 1; /* default */
-
-#define TB_SPEED	25000000
-
-static inline unsigned int readtb(void)
+void xmon_map_scc(void)
 {
-	unsigned int ret;
-
-	asm volatile("mftb %0" : "=r" (ret) :);
-	return ret;
-}
-
-void buf_access(void)
-{
-}
-
-void
-xmon_map_scc(void)
-{
-
 	cpmp = (cpm8xx_t *)&(((immap_t *)IMAP_ADDR)->im_cpm);
-	use_screen = 0;
-	
-	prom_drawstring("xmon uses serial port\n");
 }
 
-static int scc_initialized = 0;
-
 void xmon_init_scc(void);
 
-int
-xmon_write(void *handle, void *ptr, int nb)
+int xmon_write(void *ptr, int nb)
 {
-	char *p = ptr;
-	int i, c, ct;
-
-	if (!scc_initialized)
-		xmon_init_scc();
-
 	return(xmon_8xx_write(ptr, nb));
 }
 
-int xmon_wants_key;
-
-int
-xmon_read(void *handle, void *ptr, int nb)
+int xmon_readchar(void)
 {
-	char *p = ptr;
-	int i;
-
-	if (!scc_initialized)
-		xmon_init_scc();
-
-	for (i = 0; i < nb; ++i) {
-		*p++ = xmon_8xx_read_char();
-	}
-	return i;
+	return xmon_8xx_read_char();
 }
 
-int
-xmon_read_poll(void)
+int xmon_read_poll(void)
 {
 	return(xmon_8xx_read_poll());
 }
-
-void
-xmon_init_scc()
-{
-	scc_initialized = 1;
-}
-
-#if 0
-extern int (*prom_entry)(void *);
-
-int
-xmon_exit(void)
-{
-    struct prom_args {
-	char *service;
-    } args;
-
-    for (;;) {
-	args.service = "exit";
-	(*prom_entry)(&args);
-    }
-}
-#endif
-
-void *xmon_stdin;
-void *xmon_stdout;
-void *xmon_stderr;
-
-void
-xmon_init(void)
-{
-}
-
-int
-xmon_putc(int c, void *f)
-{
-    char ch = c;
-
-    if (c == '\n')
-	xmon_putc('\r', f);
-    return xmon_write(f, &ch, 1) == 1? c: -1;
-}
-
-int
-xmon_putchar(int c)
-{
-    return xmon_putc(c, xmon_stdout);
-}
-
-int
-xmon_fputs(char *str, void *f)
-{
-    int n = strlen(str);
-
-    return xmon_write(f, str, n) == n? 0: -1;
-}
-
-int
-xmon_readchar(void)
-{
-    char ch;
-
-    for (;;) {
-	switch (xmon_read(xmon_stdin, &ch, 1)) {
-	case 1:
-	    return ch;
-	case -1:
-	    xmon_printf("read(stdin) returned -1\r\n", 0, 0);
-	    return -1;
-	}
-    }
-}
-
-static char line[256];
-static char *lineptr;
-static int lineleft;
-
-#if 0
-int xmon_expect(const char *str, unsigned int timeout)
-{
-	int c;
-	unsigned int t0;
-
-	timeout *= TB_SPEED;
-	t0 = readtb();
-	do {
-		lineptr = line;
-		for (;;) {
-			c = xmon_read_poll();
-			if (c == -1) {
-				if (readtb() - t0 > timeout)
-					return 0;
-				continue;
-			}
-			if (c == '\n')
-				break;
-			if (c != '\r' && lineptr < &line[sizeof(line) - 1])
-				*lineptr++ = c;
-		}
-		*lineptr = 0;
-	} while (strstr(line, str) == NULL);
-	return 1;
-}
-#endif
-
-int
-xmon_getchar(void)
-{
-    int c;
-
-    if (lineleft == 0) {
-	lineptr = line;
-	for (;;) {
-	    c = xmon_readchar();
-	    if (c == -1 || c == 4)
-		break;
-	    if (c == '\r' || c == '\n') {
-		*lineptr++ = '\n';
-		xmon_putchar('\n');
-		break;
-	    }
-	    switch (c) {
-	    case 0177:
-	    case '\b':
-		if (lineptr > line) {
-		    xmon_putchar('\b');
-		    xmon_putchar(' ');
-		    xmon_putchar('\b');
-		    --lineptr;
-		}
-		break;
-	    case 'U' & 0x1F:
-		while (lineptr > line) {
-		    xmon_putchar('\b');
-		    xmon_putchar(' ');
-		    xmon_putchar('\b');
-		    --lineptr;
-		}
-		break;
-	    default:
-		if (lineptr >= &line[sizeof(line) - 1])
-		    xmon_putchar('\a');
-		else {
-		    xmon_putchar(c);
-		    *lineptr++ = c;
-		}
-	    }
-	}
-	lineleft = lineptr - line;
-	lineptr = line;
-    }
-    if (lineleft == 0)
-	return -1;
-    --lineleft;
-    return *lineptr++;
-}
-
-char *
-xmon_fgets(char *str, int nb, void *f)
-{
-    char *p;
-    int c;
-
-    for (p = str; p < str + nb - 1; ) {
-	c = xmon_getchar();
-	if (c == -1) {
-	    if (p == str)
-		return 0;
-	    break;
-	}
-	*p++ = c;
-	if (c == '\n')
-	    break;
-    }
-    *p = 0;
-    return str;
-}
-
-void
-prom_drawhex(uint val)
-{
-	unsigned char buf[10];
-
-	int i;
-	for (i = 7;  i >= 0;  i--)
-	{
-		buf[i] = "0123456789abcdef"[val & 0x0f];
-		val >>= 4;
-	}
-	buf[8] = '\0';
-	xmon_fputs(buf, xmon_stdout);
-}
-
-void
-prom_drawstring(const char *str)
-{
-	xmon_fputs(str, xmon_stdout);
-}
diff --git a/arch/powerpc/xmon/subr_prf.c b/arch/powerpc/xmon/subr_prf.c
deleted file mode 100644
index b48738c..0000000
--- a/arch/powerpc/xmon/subr_prf.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Written by Cort Dougan to replace the version originally used
- * by Paul Mackerras, which came from NetBSD and thus had copyright
- * conflicts with Linux.
- *
- * This file makes liberal use of the standard linux utility
- * routines to reduce the size of the binary.  We assume we can
- * trust some parts of Linux inside the debugger.
- *   -- Cort (cort@cs.nmt.edu)
- *
- * Copyright (C) 1999 Cort Dougan.
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <stdarg.h>
-#include "nonstdio.h"
-
-extern int xmon_write(void *, void *, int);
-
-void xmon_vfprintf(void *f, const char *fmt, va_list ap)
-{
-	static char xmon_buf[2048];
-	int n;
-
-	n = vsprintf(xmon_buf, fmt, ap);
-	xmon_write(f, xmon_buf, n);
-}
-
-void xmon_printf(const char *fmt, ...)
-{
-	va_list ap;
-
-	va_start(ap, fmt);
-	xmon_vfprintf(stdout, fmt, ap);
-	va_end(ap);
-}
-EXPORT_SYMBOL(xmon_printf);
-
-void xmon_fprintf(void *f, const char *fmt, ...)
-{
-	va_list ap;
-
-	va_start(ap, fmt);
-	xmon_vfprintf(f, fmt, ap);
-	va_end(ap);
-}
-
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 1124f11..cfcb2a5 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1,7 +1,7 @@
 /*
  * Routines providing a simple monitor for use on the PowerMac.
  *
- * Copyright (C) 1996 Paul Mackerras.
+ * Copyright (C) 1996-2005 Paul Mackerras.
  *
  *      This program is free software; you can redistribute it and/or
  *      modify it under the terms of the GNU General Public License
@@ -18,6 +18,7 @@
 #include <linux/kallsyms.h>
 #include <linux/cpumask.h>
 #include <linux/module.h>
+#include <linux/sysrq.h>
 
 #include <asm/ptrace.h>
 #include <asm/string.h>
@@ -144,15 +145,10 @@
 static const char *getvecname(unsigned long vec);
 
 extern int print_insn_powerpc(unsigned long, unsigned long, int);
-extern void printf(const char *fmt, ...);
-extern void xmon_vfprintf(void *f, const char *fmt, va_list ap);
-extern int xmon_putc(int c, void *f);
-extern int putchar(int ch);
 
 extern void xmon_enter(void);
 extern void xmon_leave(void);
 
-extern int xmon_read_poll(void);
 extern long setjmp(long *);
 extern void longjmp(long *, long);
 extern void xmon_save_regs(struct pt_regs *);
@@ -748,7 +744,6 @@
 		printf("%x:", smp_processor_id());
 #endif /* CONFIG_SMP */
 		printf("mon> ");
-		fflush(stdout);
 		flush_input();
 		termch = 0;
 		cmd = skipbl();
@@ -1797,7 +1792,7 @@
 	for(;;){
 		if (!mnoread)
 			n = mread(adrs, val, size);
-		printf("%.16x%c", adrs, brev? 'r': ' ');
+		printf(REG"%c", adrs, brev? 'r': ' ');
 		if (!mnoread) {
 			if (brev)
 				byterev(val, size);
@@ -1976,17 +1971,18 @@
 		nr = mread(adrs, temp, r);
 		adrs += nr;
 		for (m = 0; m < r; ++m) {
-		        if ((m & 7) == 0 && m > 0)
-			    putchar(' ');
+		        if ((m & (sizeof(long) - 1)) == 0 && m > 0)
+				putchar(' ');
 			if (m < nr)
 				printf("%.2x", temp[m]);
 			else
 				printf("%s", fault_chars[fault_type]);
 		}
-		if (m <= 8)
-			printf(" ");
-		for (; m < 16; ++m)
+		for (; m < 16; ++m) {
+		        if ((m & (sizeof(long) - 1)) == 0)
+				putchar(' ');
 			printf("  ");
+		}
 		printf("  |");
 		for (m = 0; m < r; ++m) {
 			if (m < nr) {
@@ -2151,7 +2147,6 @@
 		ok = mread(a, &v, 1);
 		if (ok && !ook) {
 			printf("%.8x .. ", a);
-			fflush(stdout);
 		} else if (!ok && ook)
 			printf("%.8x\n", a - mskip);
 		ook = ok;
@@ -2372,7 +2367,7 @@
 inchar(void)
 {
 	if (lineptr == NULL || *lineptr == 0) {
-		if (fgets(line, sizeof(line), stdin) == NULL) {
+		if (xmon_gets(line, sizeof(line)) == NULL) {
 			lineptr = NULL;
 			return EOF;
 		}
@@ -2526,4 +2521,29 @@
 		__debugger_dabr_match = NULL;
 		__debugger_fault_handler = NULL;
 	}
+	xmon_map_scc();
 }
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
+			      struct tty_struct *tty) 
+{
+	/* ensure xmon is enabled */
+	xmon_init(1);
+	debugger(pt_regs);
+}
+
+static struct sysrq_key_op sysrq_xmon_op = 
+{
+	.handler =	sysrq_handle_xmon,
+	.help_msg =	"Xmon",
+	.action_msg =	"Entering xmon",
+};
+
+static int __init setup_xmon_sysrq(void)
+{
+	register_sysrq_key('x', &sysrq_xmon_op);
+	return 0;
+}
+__initcall(setup_xmon_sysrq);
+#endif /* CONFIG_MAGIC_SYSRQ */
diff --git a/arch/ppc/boot/include/of1275.h b/arch/ppc/boot/include/of1275.h
index 69173df..4ed88ac 100644
--- a/arch/ppc/boot/include/of1275.h
+++ b/arch/ppc/boot/include/of1275.h
@@ -19,6 +19,9 @@
 
 /* function declarations */
 
+int	call_prom(const char *service, int nargs, int nret, ...);
+int	call_prom_ret(const char *service, int nargs, int nret,
+		      unsigned int *rets, ...);
 void *	claim(unsigned int virt, unsigned int size, unsigned int align);
 int	map(unsigned int phys, unsigned int virt, unsigned int size);
 void	enter(void);
diff --git a/arch/ppc/boot/of1275/Makefile b/arch/ppc/boot/of1275/Makefile
index 02e6f23..0b979c0 100644
--- a/arch/ppc/boot/of1275/Makefile
+++ b/arch/ppc/boot/of1275/Makefile
@@ -3,4 +3,4 @@
 #
 
 lib-y := claim.o enter.o exit.o finddevice.o getprop.o ofinit.o	\
-	 ofstdio.o read.o release.o write.o map.o
+	 ofstdio.o read.o release.o write.o map.o call_prom.o
diff --git a/arch/ppc/boot/of1275/call_prom.c b/arch/ppc/boot/of1275/call_prom.c
new file mode 100644
index 0000000..9479a3a
--- /dev/null
+++ b/arch/ppc/boot/of1275/call_prom.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "of1275.h"
+#include <stdarg.h>
+
+int call_prom(const char *service, int nargs, int nret, ...)
+{
+	int i;
+	struct prom_args {
+		const char *service;
+		int nargs;
+		int nret;
+		unsigned int args[12];
+	} args;
+	va_list list;
+
+	args.service = service;
+	args.nargs = nargs;
+	args.nret = nret;
+
+	va_start(list, nret);
+	for (i = 0; i < nargs; i++)
+		args.args[i] = va_arg(list, unsigned int);
+	va_end(list);
+
+	for (i = 0; i < nret; i++)
+		args.args[nargs+i] = 0;
+
+	if (of_prom_entry(&args) < 0)
+		return -1;
+
+	return (nret > 0)? args.args[nargs]: 0;
+}
+
+int call_prom_ret(const char *service, int nargs, int nret,
+		  unsigned int *rets, ...)
+{
+	int i;
+	struct prom_args {
+		const char *service;
+		int nargs;
+		int nret;
+		unsigned int args[12];
+	} args;
+	va_list list;
+
+	args.service = service;
+	args.nargs = nargs;
+	args.nret = nret;
+
+	va_start(list, rets);
+	for (i = 0; i < nargs; i++)
+		args.args[i] = va_arg(list, unsigned int);
+	va_end(list);
+
+	for (i = 0; i < nret; i++)
+		args.args[nargs+i] = 0;
+
+	if (of_prom_entry(&args) < 0)
+		return -1;
+
+	if (rets != (void *) 0)
+		for (i = 1; i < nret; ++i)
+			rets[i-1] = args.args[nargs+i];
+
+	return (nret > 0)? args.args[nargs]: 0;
+}
diff --git a/arch/ppc/boot/of1275/claim.c b/arch/ppc/boot/of1275/claim.c
index 13169a5..1ed3aee 100644
--- a/arch/ppc/boot/of1275/claim.c
+++ b/arch/ppc/boot/of1275/claim.c
@@ -9,27 +9,84 @@
  */
 
 #include "of1275.h"
+#include "nonstdio.h"
 
-void *
-claim(unsigned int virt, unsigned int size, unsigned int align)
+/*
+ * Older OF's require that when claiming a specific range of addresses,
+ * we claim the physical space in the /memory node and the virtual
+ * space in the chosen mmu node, and then do a map operation to
+ * map virtual to physical.
+ */
+static int need_map = -1;
+static ihandle chosen_mmu;
+static phandle memory;
+
+/* returns true if s2 is a prefix of s1 */
+static int string_match(const char *s1, const char *s2)
 {
-    struct prom_args {
-	char *service;
-	int nargs;
-	int nret;
-	unsigned int virt;
-	unsigned int size;
-	unsigned int align;
-	void *ret;
-    } args;
+	for (; *s2; ++s2)
+		if (*s1++ != *s2)
+			return 0;
+	return 1;
+}
 
-    args.service = "claim";
-    args.nargs = 3;
-    args.nret = 1;
-    args.virt = virt;
-    args.size = size;
-    args.align = align;
-    args.ret = (void *) 0;
-    (*of_prom_entry)(&args);
-    return args.ret;
+static int check_of_version(void)
+{
+	phandle oprom, chosen;
+	char version[64];
+
+	oprom = finddevice("/openprom");
+	if (oprom == OF_INVALID_HANDLE)
+		return 0;
+	if (getprop(oprom, "model", version, sizeof(version)) <= 0)
+		return 0;
+	version[sizeof(version)-1] = 0;
+	printf("OF version = '%s'\n", version);
+	if (!string_match(version, "Open Firmware, 1.")
+	    && !string_match(version, "FirmWorks,3."))
+		return 0;
+	chosen = finddevice("/chosen");
+	if (chosen == OF_INVALID_HANDLE) {
+		chosen = finddevice("/chosen@0");
+		if (chosen == OF_INVALID_HANDLE) {
+			printf("no chosen\n");
+			return 0;
+		}
+	}
+	if (getprop(chosen, "mmu", &chosen_mmu, sizeof(chosen_mmu)) <= 0) {
+		printf("no mmu\n");
+		return 0;
+	}
+	memory = (ihandle) call_prom("open", 1, 1, "/memory");
+	if (memory == OF_INVALID_HANDLE) {
+		memory = (ihandle) call_prom("open", 1, 1, "/memory@0");
+		if (memory == OF_INVALID_HANDLE) {
+			printf("no memory node\n");
+			return 0;
+		}
+	}
+	printf("old OF detected\n");
+	return 1;
+}
+
+void *claim(unsigned int virt, unsigned int size, unsigned int align)
+{
+	int ret;
+	unsigned int result;
+
+	if (need_map < 0)
+		need_map = check_of_version();
+	if (align || !need_map)
+		return (void *) call_prom("claim", 3, 1, virt, size, align);
+	
+	ret = call_prom_ret("call-method", 5, 2, &result, "claim", memory,
+			    align, size, virt);
+	if (ret != 0 || result == -1)
+		return (void *) -1;
+	ret = call_prom_ret("call-method", 5, 2, &result, "claim", chosen_mmu,
+			    align, size, virt);
+	/* 0x12 == coherent + read/write */
+	ret = call_prom("call-method", 6, 1, "map", chosen_mmu,
+			0x12, size, virt, virt);
+	return virt;
 }
diff --git a/arch/ppc/boot/of1275/finddevice.c b/arch/ppc/boot/of1275/finddevice.c
index 2c0f7cb..0dcb120 100644
--- a/arch/ppc/boot/of1275/finddevice.c
+++ b/arch/ppc/boot/of1275/finddevice.c
@@ -10,22 +10,7 @@
 
 #include "of1275.h"
 
-phandle
-finddevice(const char *name)
+phandle finddevice(const char *name)
 {
-    struct prom_args {
-	char *service;
-	int nargs;
-	int nret;
-	const char *devspec;
-	phandle device;
-    } args;
-
-    args.service = "finddevice";
-    args.nargs = 1;
-    args.nret = 1;
-    args.devspec = name;
-    args.device = OF_INVALID_HANDLE;
-    (*of_prom_entry)(&args);
-    return args.device;
+	return (phandle) call_prom("finddevice", 1, 1, name);
 }
diff --git a/arch/ppc/boot/openfirmware/Makefile b/arch/ppc/boot/openfirmware/Makefile
index 0341523..83a6433 100644
--- a/arch/ppc/boot/openfirmware/Makefile
+++ b/arch/ppc/boot/openfirmware/Makefile
@@ -80,8 +80,7 @@
 	$(call if_changed,mknote)
 
 
-$(obj)/coffcrt0.o: EXTRA_AFLAGS := -traditional -DXCOFF
-$(obj)/crt0.o:     EXTRA_AFLAGS := -traditional
+$(obj)/coffcrt0.o: EXTRA_AFLAGS := -DXCOFF
 targets += coffcrt0.o crt0.o
 $(obj)/coffcrt0.o $(obj)/crt0.o: $(common)/crt0.S FORCE
 	$(call if_changed_dep,as_o_S)
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index 76a55a4..17a4da6 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -12,7 +12,7 @@
 extra-$(CONFIG_POWER4)		+= idle_power4.o
 extra-y				+= vmlinux.lds
 
-obj-y				:= entry.o traps.o irq.o idle.o time.o misc.o \
+obj-y				:= entry.o traps.o idle.o time.o misc.o \
 					process.o align.o \
 					setup.o \
 					ppc_htab.o
@@ -38,8 +38,7 @@
 # These are here while we do the architecture merge
 
 else
-obj-y				:= irq.o idle.o \
-					align.o
+obj-y				:= idle.o align.o
 obj-$(CONFIG_6xx)		+= l2cr.o cpu_setup_6xx.o
 obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o
 obj-$(CONFIG_MODULES)		+= module.o
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
index aeb349b..f3d274c 100644
--- a/arch/ppc/kernel/head_booke.h
+++ b/arch/ppc/kernel/head_booke.h
@@ -358,6 +358,6 @@
 	NORMAL_EXCEPTION_PROLOG;					      \
 	bne	load_up_fpu;		/* if from user, just load it up */   \
 	addi	r3,r1,STACK_FRAME_OVERHEAD;				      \
-	EXC_XFER_EE_LITE(0x800, KernelFP)
+	EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
 
 #endif /* __HEAD_BOOKE_H__ */
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index 3c4e4cb..821a75e 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -63,7 +63,7 @@
 	int cpu = smp_processor_id();
 
 	for (;;) {
-		while (need_resched()) {
+		while (!need_resched()) {
 			if (ppc_md.idle != NULL)
 				ppc_md.idle();
 			else
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
deleted file mode 100644
index fbb2b9f..0000000
--- a/arch/ppc/kernel/irq.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- *  arch/ppc/kernel/irq.c
- *
- *  Derived from arch/i386/kernel/irq.c
- *    Copyright (C) 1992 Linus Torvalds
- *  Adapted from arch/i386 by Gary Thomas
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
- *    Copyright (C) 1996-2001 Cort Dougan
- *  Adapted for Power Macintosh by Paul Mackerras
- *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
- *
- * This file contains the code used by various IRQ handling routines:
- * asking for different IRQ's should be done through these routines
- * instead of just grabbing them. Thus setups with different IRQ numbers
- * shouldn't result in any weird surprises, and installing new handlers
- * should be easier.
- *
- * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
- * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
- * mask register (of which only 16 are defined), hence the weird shifting
- * and complement of the cached_irq_mask.  I want to be able to stuff
- * this right into the SIU SMASK register.
- * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
- * to reduce code space and undefined function references.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/threads.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/proc_fs.h>
-#include <linux/random.h>
-#include <linux/seq_file.h>
-#include <linux/cpumask.h>
-#include <linux/profile.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/cache.h>
-#include <asm/prom.h>
-#include <asm/ptrace.h>
-#include <asm/machdep.h>
-
-#define NR_MASK_WORDS	((NR_IRQS + 31) / 32)
-
-extern atomic_t ipi_recv;
-extern atomic_t ipi_sent;
-
-#define MAXCOUNT 10000000
-
-int ppc_spurious_interrupts = 0;
-struct irqaction *ppc_irq_action[NR_IRQS];
-unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
-unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
-atomic_t ppc_n_lost_interrupts;
-
-#ifdef CONFIG_TAU_INT
-extern int tau_initialized;
-extern int tau_interrupts(int);
-#endif
-
-int show_interrupts(struct seq_file *p, void *v)
-{
-	int i = *(loff_t *) v, j;
-	struct irqaction * action;
-	unsigned long flags;
-
-	if (i == 0) {
-		seq_puts(p, "           ");
-		for (j=0; j<NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "CPU%d       ", j);
-		seq_putc(p, '\n');
-	}
-
-	if (i < NR_IRQS) {
-		spin_lock_irqsave(&irq_desc[i].lock, flags);
-		action = irq_desc[i].action;
-		if ( !action || !action->handler )
-			goto skip;
-		seq_printf(p, "%3d: ", i);
-#ifdef CONFIG_SMP
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j))
-				seq_printf(p, "%10u ",
-					   kstat_cpu(j).irqs[i]);
-#else
-		seq_printf(p, "%10u ", kstat_irqs(i));
-#endif /* CONFIG_SMP */
-		if (irq_desc[i].handler)
-			seq_printf(p, " %s ", irq_desc[i].handler->typename);
-		else
-			seq_puts(p, "  None      ");
-		seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge  ");
-		seq_printf(p, "    %s", action->name);
-		for (action = action->next; action; action = action->next)
-			seq_printf(p, ", %s", action->name);
-		seq_putc(p, '\n');
-skip:
-		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-	} else if (i == NR_IRQS) {
-#ifdef CONFIG_TAU_INT
-		if (tau_initialized){
-			seq_puts(p, "TAU: ");
-			for (j = 0; j < NR_CPUS; j++)
-				if (cpu_online(j))
-					seq_printf(p, "%10u ", tau_interrupts(j));
-			seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
-		}
-#endif
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
-		/* should this be per processor send/receive? */
-		seq_printf(p, "IPI (recv/sent): %10u/%u\n",
-				atomic_read(&ipi_recv), atomic_read(&ipi_sent));
-#endif
-		seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
-	}
-	return 0;
-}
-
-void do_IRQ(struct pt_regs *regs)
-{
-	int irq, first = 1;
-        irq_enter();
-
-	/*
-	 * Every platform is required to implement ppc_md.get_irq.
-	 * This function will either return an irq number or -1 to
-	 * indicate there are no more pending.  But the first time
-	 * through the loop this means there wasn't and IRQ pending.
-	 * The value -2 is for buggy hardware and means that this IRQ
-	 * has already been handled. -- Tom
-	 */
-	while ((irq = ppc_md.get_irq(regs)) >= 0) {
-		__do_IRQ(irq, regs);
-		first = 0;
-	}
-	if (irq != -2 && first)
-		/* That's not SMP safe ... but who cares ? */
-		ppc_spurious_interrupts++;
-        irq_exit();
-}
-
-void __init init_IRQ(void)
-{
-	ppc_md.init_IRQ();
-}
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index ae6af29..5e61124 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -497,9 +497,9 @@
  * and invalidate the corresponding instruction cache blocks.
  * This is a no-op on the 601.
  *
- * flush_icache_range(unsigned long start, unsigned long stop)
+ * __flush_icache_range(unsigned long start, unsigned long stop)
  */
-_GLOBAL(flush_icache_range)
+_GLOBAL(__flush_icache_range)
 BEGIN_FTR_SECTION
 	blr				/* for 601, do nothing */
 END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index e0ca61b..66073f7 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -46,6 +46,7 @@
 #include <asm/btext.h>
 #include <asm/div64.h>
 #include <asm/xmon.h>
+#include <asm/signal.h>
 
 #ifdef  CONFIG_8xx
 #include <asm/commproc.h>
@@ -57,7 +58,6 @@
 extern void alignment_exception(struct pt_regs *regs);
 extern void program_check_exception(struct pt_regs *regs);
 extern void single_step_exception(struct pt_regs *regs);
-extern int do_signal(sigset_t *, struct pt_regs *);
 extern int pmac_newworld;
 extern int sys_sigreturn(struct pt_regs *regs);
 
@@ -78,7 +78,6 @@
 EXPORT_SYMBOL(single_step_exception);
 EXPORT_SYMBOL(sys_sigreturn);
 EXPORT_SYMBOL(ppc_n_lost_interrupts);
-EXPORT_SYMBOL(ppc_lost_interrupts);
 
 EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
 EXPORT_SYMBOL(DMA_MODE_READ);
@@ -176,6 +175,7 @@
 #endif /* CONFIG_PCI */
 
 #ifdef CONFIG_NOT_COHERENT_CACHE
+extern void flush_dcache_all(void);
 EXPORT_SYMBOL(flush_dcache_all);
 #endif
 
@@ -217,9 +217,6 @@
 EXPORT_SYMBOL(cuda_request);
 EXPORT_SYMBOL(cuda_poll);
 #endif /* CONFIG_ADB_CUDA */
-#ifdef CONFIG_PPC_MULTIPLATFORM
-EXPORT_SYMBOL(_machine);
-#endif
 #ifdef CONFIG_PPC_PMAC
 EXPORT_SYMBOL(sys_ctrler);
 EXPORT_SYMBOL(pmac_newworld);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 6bcb85d..dc55e1a 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -76,6 +76,7 @@
 
 #ifdef CONFIG_PPC_MULTIPLATFORM
 int _machine = 0;
+EXPORT_SYMBOL(_machine);
 
 extern void prep_init(unsigned long r3, unsigned long r4,
 		unsigned long r5, unsigned long r6, unsigned long r7);
diff --git a/arch/ppc/platforms/pmac_pic.c b/arch/ppc/platforms/pmac_pic.c
index 9f2d95e..4742bf6 100644
--- a/arch/ppc/platforms/pmac_pic.c
+++ b/arch/ppc/platforms/pmac_pic.c
@@ -75,6 +75,9 @@
 #define GATWICK_IRQ_POOL_SIZE        10
 static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
 
+#define NR_MASK_WORDS	((NR_IRQS + 31) / 32)
+static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
+
 /*
  * Mark an irq as "lost".  This is only used on the pmac
  * since it can lose interrupts (see pmac_set_irq_mask).
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index 067d7d5..4415748 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -61,6 +61,15 @@
 #include <asm/pci-bridge.h>
 #include <asm/todc.h>
 
+/* prep registers for L2 */
+#define CACHECRBA       0x80000823      /* Cache configuration register address */
+#define L2CACHE_MASK	0x03	/* Mask for 2 L2 Cache bits */
+#define L2CACHE_512KB	0x00	/* 512KB */
+#define L2CACHE_256KB	0x01	/* 256KB */
+#define L2CACHE_1MB	0x02	/* 1MB */
+#define L2CACHE_NONE	0x03	/* NONE */
+#define L2CACHE_PARITY  0x08    /* Mask for L2 Cache Parity Protected bit */
+
 TODC_ALLOC();
 
 unsigned char ucSystemType;
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 2955234..c9d32db 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -297,6 +297,10 @@
 	def_bool y
 	depends on NEED_MULTIPLE_NODES
 
+config ARCH_MEMORY_PROBE
+	def_bool y
+	depends on MEMORY_HOTPLUG
+
 # Some NUMA nodes have memory ranges that span
 # other nodes.  Even though a pfn is valid and
 # between a node's start and end pfns, it may not
diff --git a/arch/ppc64/boot/addRamDisk.c b/arch/ppc64/boot/addRamDisk.c
index 7f2c094..c02a999 100644
--- a/arch/ppc64/boot/addRamDisk.c
+++ b/arch/ppc64/boot/addRamDisk.c
@@ -5,11 +5,59 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <string.h>
+#include <elf.h>
 
 #define ElfHeaderSize  (64 * 1024)
 #define ElfPages  (ElfHeaderSize / 4096)
 #define KERNELBASE (0xc000000000000000)
+#define _ALIGN_UP(addr,size)	(((addr)+((size)-1))&(~((size)-1)))
 
+struct addr_range {
+	unsigned long long addr;
+	unsigned long memsize;
+	unsigned long offset;
+};
+
+static int check_elf64(void *p, int size, struct addr_range *r)
+{
+	Elf64_Ehdr *elf64 = p;
+	Elf64_Phdr *elf64ph;
+
+	if (elf64->e_ident[EI_MAG0] != ELFMAG0 ||
+	    elf64->e_ident[EI_MAG1] != ELFMAG1 ||
+	    elf64->e_ident[EI_MAG2] != ELFMAG2 ||
+	    elf64->e_ident[EI_MAG3] != ELFMAG3 ||
+	    elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
+	    elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
+	    elf64->e_type != ET_EXEC || elf64->e_machine != EM_PPC64)
+		return 0;
+
+	if ((elf64->e_phoff + sizeof(Elf64_Phdr)) > size)
+		return 0;
+
+	elf64ph = (Elf64_Phdr *) ((unsigned long)elf64 +
+				  (unsigned long)elf64->e_phoff);
+
+	r->memsize = (unsigned long)elf64ph->p_memsz;
+	r->offset = (unsigned long)elf64ph->p_offset;
+	r->addr = (unsigned long long)elf64ph->p_vaddr;
+
+#ifdef DEBUG
+	printf("PPC64 ELF file, ph:\n");
+	printf("p_type   0x%08x\n", elf64ph->p_type);
+	printf("p_flags  0x%08x\n", elf64ph->p_flags);
+	printf("p_offset 0x%016llx\n", elf64ph->p_offset);
+	printf("p_vaddr  0x%016llx\n", elf64ph->p_vaddr);
+	printf("p_paddr  0x%016llx\n", elf64ph->p_paddr);
+	printf("p_filesz 0x%016llx\n", elf64ph->p_filesz);
+	printf("p_memsz  0x%016llx\n", elf64ph->p_memsz);
+	printf("p_align  0x%016llx\n", elf64ph->p_align);
+	printf("... skipping 0x%08lx bytes of ELF header\n",
+	       (unsigned long)elf64ph->p_offset);
+#endif
+
+	return 64;
+}
 void get4k(FILE *file, char *buf )
 {
 	unsigned j;
@@ -34,97 +82,92 @@
 int main(int argc, char **argv)
 {
 	char inbuf[4096];
-	FILE *ramDisk = NULL;
-	FILE *sysmap = NULL;
-	FILE *inputVmlinux = NULL;
-	FILE *outputVmlinux = NULL;
-  
-	unsigned i = 0;
-	unsigned long ramFileLen = 0;
-	unsigned long ramLen = 0;
-	unsigned long roundR = 0;
-  
-	unsigned long sysmapFileLen = 0;
-	unsigned long sysmapLen = 0;
-	unsigned long sysmapPages = 0;
-	char* ptr_end = NULL; 
-	unsigned long offset_end = 0;
+	struct addr_range vmlinux;
+	FILE *ramDisk;
+	FILE *inputVmlinux;
+	FILE *outputVmlinux;
 
-	unsigned long kernelLen = 0;
-	unsigned long actualKernelLen = 0;
-	unsigned long round = 0;
-	unsigned long roundedKernelLen = 0;
-	unsigned long ramStartOffs = 0;
-	unsigned long ramPages = 0;
-	unsigned long roundedKernelPages = 0;
-	unsigned long hvReleaseData = 0;
+	char *rd_name, *lx_name, *out_name;
+
+	size_t i;
+	unsigned long ramFileLen;
+	unsigned long ramLen;
+	unsigned long roundR;
+	unsigned long offset_end;
+
+	unsigned long kernelLen;
+	unsigned long actualKernelLen;
+	unsigned long round;
+	unsigned long roundedKernelLen;
+	unsigned long ramStartOffs;
+	unsigned long ramPages;
+	unsigned long roundedKernelPages;
+	unsigned long hvReleaseData;
 	u_int32_t eyeCatcher = 0xc8a5d9c4;
-	unsigned long naca = 0;
-	unsigned long xRamDisk = 0;
-	unsigned long xRamDiskSize = 0;
-	long padPages = 0;
+	unsigned long naca;
+	unsigned long xRamDisk;
+	unsigned long xRamDiskSize;
+	long padPages;
   
   
 	if (argc < 2) {
 		fprintf(stderr, "Name of RAM disk file missing.\n");
 		exit(1);
 	}
+	rd_name = argv[1];
 
 	if (argc < 3) {
-		fprintf(stderr, "Name of System Map input file is missing.\n");
-		exit(1);
-	}
-  
-	if (argc < 4) {
 		fprintf(stderr, "Name of vmlinux file missing.\n");
 		exit(1);
 	}
+	lx_name = argv[2];
 
-	if (argc < 5) {
+	if (argc < 4) {
 		fprintf(stderr, "Name of vmlinux output file missing.\n");
 		exit(1);
 	}
+	out_name = argv[3];
 
 
-	ramDisk = fopen(argv[1], "r");
+	ramDisk = fopen(rd_name, "r");
 	if ( ! ramDisk ) {
-		fprintf(stderr, "RAM disk file \"%s\" failed to open.\n", argv[1]);
+		fprintf(stderr, "RAM disk file \"%s\" failed to open.\n", rd_name);
 		exit(1);
 	}
 
-	sysmap = fopen(argv[2], "r");
-	if ( ! sysmap ) {
-		fprintf(stderr, "System Map file \"%s\" failed to open.\n", argv[2]);
-		exit(1);
-	}
-  
-	inputVmlinux = fopen(argv[3], "r");
+	inputVmlinux = fopen(lx_name, "r");
 	if ( ! inputVmlinux ) {
-		fprintf(stderr, "vmlinux file \"%s\" failed to open.\n", argv[3]);
+		fprintf(stderr, "vmlinux file \"%s\" failed to open.\n", lx_name);
 		exit(1);
 	}
   
-	outputVmlinux = fopen(argv[4], "w+");
+	outputVmlinux = fopen(out_name, "w+");
 	if ( ! outputVmlinux ) {
-		fprintf(stderr, "output vmlinux file \"%s\" failed to open.\n", argv[4]);
+		fprintf(stderr, "output vmlinux file \"%s\" failed to open.\n", out_name);
 		exit(1);
 	}
-  
-  
-  
+
+	i = fread(inbuf, 1, sizeof(inbuf), inputVmlinux);
+	if (i != sizeof(inbuf)) {
+		fprintf(stderr, "can not read vmlinux file %s: %u\n", lx_name, i);
+		exit(1);
+	}
+
+	i = check_elf64(inbuf, sizeof(inbuf), &vmlinux);
+	if (i == 0) {
+		fprintf(stderr, "You must have a linux kernel specified as argv[2]\n");
+		exit(1);
+	}
+
 	/* Input Vmlinux file */
 	fseek(inputVmlinux, 0, SEEK_END);
 	kernelLen = ftell(inputVmlinux);
 	fseek(inputVmlinux, 0, SEEK_SET);
-	printf("kernel file size = %d\n", kernelLen);
-	if ( kernelLen == 0 ) {
-		fprintf(stderr, "You must have a linux kernel specified as argv[3]\n");
-		exit(1);
-	}
+	printf("kernel file size = %lu\n", kernelLen);
 
 	actualKernelLen = kernelLen - ElfHeaderSize;
 
-	printf("actual kernel length (minus ELF header) = %d\n", actualKernelLen);
+	printf("actual kernel length (minus ELF header) = %lu\n", actualKernelLen);
 
 	round = actualKernelLen % 4096;
 	roundedKernelLen = actualKernelLen;
@@ -134,39 +177,7 @@
 	roundedKernelPages = roundedKernelLen / 4096;
 	printf("Vmlinux pages to copy = %ld/0x%lx \n", roundedKernelPages, roundedKernelPages);
 
-
-
-	/* Input System Map file */
-	/* (needs to be processed simply to determine if we need to add pad pages due to the static variables not being included in the vmlinux) */
-	fseek(sysmap, 0, SEEK_END);
-	sysmapFileLen = ftell(sysmap);
-	fseek(sysmap, 0, SEEK_SET);
-	printf("%s file size = %ld/0x%lx \n", argv[2], sysmapFileLen, sysmapFileLen);
-
-	sysmapLen = sysmapFileLen;
-
-	roundR = 4096 - (sysmapLen % 4096);
-	if (roundR) {
-		printf("Rounding System Map file up to a multiple of 4096, adding %ld/0x%lx \n", roundR, roundR);
-		sysmapLen += roundR;
-	}
-	printf("Rounded System Map size is %ld/0x%lx \n", sysmapLen, sysmapLen);
-  
-	/* Process the Sysmap file to determine where _end is */
-	sysmapPages = sysmapLen / 4096;
-	/* read the whole file line by line, expect that it doesn't fail */
-	while ( fgets(inbuf, 4096, sysmap) )  ;
-	/* search for _end in the last page of the system map */
-	ptr_end = strstr(inbuf, " _end");
-	if (!ptr_end) {
-		fprintf(stderr, "Unable to find _end in the sysmap file \n");
-		fprintf(stderr, "inbuf: \n");
-		fprintf(stderr, "%s \n", inbuf);
-		exit(1);
-	}
-	printf("Found _end in the last page of the sysmap - backing up 10 characters it looks like %s", ptr_end-10);
-	/* convert address of _end in system map to hex offset. */
-	offset_end = (unsigned int)strtol(ptr_end-10, NULL, 16);
+	offset_end = _ALIGN_UP(vmlinux.memsize, 4096);
 	/* calc how many pages we need to insert between the vmlinux and the start of the ram disk */
 	padPages = offset_end/4096 - roundedKernelPages;
 
@@ -194,7 +205,7 @@
 	fseek(ramDisk, 0, SEEK_END);
 	ramFileLen = ftell(ramDisk);
 	fseek(ramDisk, 0, SEEK_SET);
-	printf("%s file size = %ld/0x%lx \n", argv[1], ramFileLen, ramFileLen);
+	printf("%s file size = %ld/0x%lx \n", rd_name, ramFileLen, ramFileLen);
 
 	ramLen = ramFileLen;
 
@@ -248,19 +259,19 @@
 	/* fseek to the hvReleaseData pointer */
 	fseek(outputVmlinux, ElfHeaderSize + 0x24, SEEK_SET);
 	if (fread(&hvReleaseData, 4, 1, outputVmlinux) != 1) {
-		death("Could not read hvReleaseData pointer\n", outputVmlinux, argv[4]);
+		death("Could not read hvReleaseData pointer\n", outputVmlinux, out_name);
 	}
 	hvReleaseData = ntohl(hvReleaseData); /* Convert to native int */
-	printf("hvReleaseData is at %08x\n", hvReleaseData);
+	printf("hvReleaseData is at %08lx\n", hvReleaseData);
 
 	/* fseek to the hvReleaseData */
 	fseek(outputVmlinux, ElfHeaderSize + hvReleaseData, SEEK_SET);
 	if (fread(inbuf, 0x40, 1, outputVmlinux) != 1) {
-		death("Could not read hvReleaseData\n", outputVmlinux, argv[4]);
+		death("Could not read hvReleaseData\n", outputVmlinux, out_name);
 	}
 	/* Check hvReleaseData sanity */
 	if (memcmp(inbuf, &eyeCatcher, 4) != 0) {
-		death("hvReleaseData is invalid\n", outputVmlinux, argv[4]);
+		death("hvReleaseData is invalid\n", outputVmlinux, out_name);
 	}
 	/* Get the naca pointer */
 	naca = ntohl(*((u_int32_t*) &inbuf[0x0C])) - KERNELBASE;
@@ -269,13 +280,13 @@
 	/* fseek to the naca */
 	fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
 	if (fread(inbuf, 0x18, 1, outputVmlinux) != 1) {
-		death("Could not read naca\n", outputVmlinux, argv[4]);
+		death("Could not read naca\n", outputVmlinux, out_name);
 	}
 	xRamDisk = ntohl(*((u_int32_t *) &inbuf[0x0c]));
 	xRamDiskSize = ntohl(*((u_int32_t *) &inbuf[0x14]));
 	/* Make sure a RAM disk isn't already present */
 	if ((xRamDisk != 0) || (xRamDiskSize != 0)) {
-		death("RAM disk is already attached to this kernel\n", outputVmlinux, argv[4]);
+		death("RAM disk is already attached to this kernel\n", outputVmlinux, out_name);
 	}
 	/* Fill in the values */
 	*((u_int32_t *) &inbuf[0x0c]) = htonl(ramStartOffs);
@@ -285,15 +296,15 @@
 	fflush(outputVmlinux);
 	fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
 	if (fwrite(inbuf, 0x18, 1, outputVmlinux) != 1) {
-		death("Could not write naca\n", outputVmlinux, argv[4]);
+		death("Could not write naca\n", outputVmlinux, out_name);
 	}
-	printf("Ram Disk of 0x%lx pages is attached to the kernel at offset 0x%08x\n",
+	printf("Ram Disk of 0x%lx pages is attached to the kernel at offset 0x%08lx\n",
 	       ramPages, ramStartOffs);
 
 	/* Done */
 	fclose(outputVmlinux);
 	/* Set permission to executable */
-	chmod(argv[4], S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
+	chmod(out_name, S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
 
 	return 0;
 }
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index c441aeb..58b19f1 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -11,12 +11,11 @@
 
 endif
 
-obj-y               +=	irq.o idle.o dma.o \
-			align.o pacaData.o \
-			udbg.o ioctl32.o \
+obj-y               +=	idle.o dma.o \
+			align.o \
+			udbg.o \
 			rtc.o \
-			cpu_setup_power4.o \
-			iommu.o sysfs.o vdso.o firmware.o
+			iommu.o vdso.o
 obj-y += vdso32/ vdso64/
 
 pci-obj-$(CONFIG_PPC_MULTIPLATFORM)	+= pci_dn.o pci_direct_iommu.o
@@ -31,15 +30,10 @@
 obj-$(CONFIG_PPC_PSERIES) += udbg_16550.o
 
 obj-$(CONFIG_KEXEC)		+= machine_kexec.o
-obj-$(CONFIG_EEH)		+= eeh.o
-obj-$(CONFIG_PROC_FS)		+= proc_ppc64.o
 obj-$(CONFIG_MODULES)		+= module.o
 ifneq ($(CONFIG_PPC_MERGE),y)
 obj-$(CONFIG_MODULES)		+= ppc_ksyms.o
 endif
-obj-$(CONFIG_PPC_RTAS)		+= rtas_pci.o
-obj-$(CONFIG_SCANLOG)		+= scanlog.o
-obj-$(CONFIG_LPARCFG)		+= lparcfg.o
 obj-$(CONFIG_HVC_CONSOLE)	+= hvconsole.o
 ifneq ($(CONFIG_PPC_MERGE),y)
 obj-$(CONFIG_BOOTX_TEXT)	+= btext.o
@@ -52,8 +46,6 @@
 
 obj-$(CONFIG_KPROBES)		+= kprobes.o
 
-CFLAGS_ioctl32.o += -Ifs/
-
 ifneq ($(CONFIG_PPC_MERGE),y)
 ifeq ($(CONFIG_PPC_ISERIES),y)
 arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index bce9065..84ab5c1 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -74,7 +74,6 @@
 	DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
 	DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
 	DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
-	DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
 	DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
 
 	/* paca */
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 9e8050e..1c869ea 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -28,7 +28,6 @@
 #include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/mmu.h>
-#include <asm/systemcfg.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/bug.h>
@@ -1701,21 +1700,9 @@
 	HMT_MEDIUM			/* Set thread priority to MEDIUM */
 
 	ld	r2,PACATOC(r13)
-	li	r6,0
-	stb	r6,PACAPROCENABLED(r13)
 
-#ifndef CONFIG_PPC_ISERIES
-	/* Initialize the page table pointer register. */
-	LOADADDR(r6,_SDR1)
-	ld	r6,0(r6)		/* get the value of _SDR1	 */
-	mtspr	SPRN_SDR1,r6			/* set the htab location	 */
-#endif
-	/* Initialize the first segment table (or SLB) entry		 */
-	ld	r3,PACASTABVIRT(r13)	/* get addr of segment table	 */
-BEGIN_FTR_SECTION
-	bl	.stab_initialize
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-	bl	.slb_initialize
+	/* Do early setup for that CPU */
+	bl	.early_setup_secondary
 
 	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
 	LOADADDR(r3,current_set)
@@ -1724,37 +1711,6 @@
 	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 	std	r1,PACAKSAVE(r13)
 
-	ld	r3,PACASTABREAL(r13)	/* get raddr of segment table	 */
-	ori	r4,r3,1			/* turn on valid bit		 */
-
-#ifdef CONFIG_PPC_ISERIES
-	li	r0,-1			/* hypervisor call */
-	li	r3,1
-	sldi	r3,r3,63		/* 0x8000000000000000 */
-	ori	r3,r3,4			/* 0x8000000000000004 */
-	sc				/* HvCall_setASR */
-#else
-	/* set the ASR */
-	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg	 */
-	ld	r3,0(r3)
-	lwz	r3,PLATFORM(r3)		/* r3 = platform flags		 */
-	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
-	beq	98f			/* branch if result is 0  */
-	mfspr	r3,SPRN_PVR
-	srwi	r3,r3,16
-	cmpwi	r3,0x37			/* SStar  */
-	beq	97f
-	cmpwi	r3,0x36			/* IStar  */
-	beq	97f
-	cmpwi	r3,0x34			/* Pulsar */
-	bne	98f
-97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
-	HVSC				/* Invoking hcall */
-	b	99f
-98:					/* !(rpa hypervisor) || !(star)  */
-	mtasr	r4			/* set the stab location	 */
-99:
-#endif
 	li	r7,0
 	mtlr	r7
 
@@ -1896,40 +1852,6 @@
 	mr	r3,r31
  	bl	.early_setup
 
-	/* set the ASR */
-	ld	r3,PACASTABREAL(r13)
-	ori	r4,r3,1			/* turn on valid bit		 */
-	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
-	ld	r3,0(r3)
-	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
-	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
-	beq	98f			/* branch if result is 0  */
-	mfspr	r3,SPRN_PVR
-	srwi	r3,r3,16
-	cmpwi	r3,0x37			/* SStar */
-	beq	97f
-	cmpwi	r3,0x36			/* IStar  */
-	beq	97f
-	cmpwi	r3,0x34			/* Pulsar */
-	bne	98f
-97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
-	HVSC				/* Invoking hcall */
-	b	99f
-98:					/* !(rpa hypervisor) || !(star) */
-	mtasr	r4			/* set the stab location	*/
-99:
-	/* Set SDR1 (hash table pointer) */
-	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
-	ld	r3,0(r3)
-	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
-	/* Test if bit 0 is set (LPAR bit) */
-	andi.	r3,r3,PLATFORM_LPAR
-	bne	98f			/* branch if result is !0  */
-	LOADADDR(r6,_SDR1)		/* Only if NOT LPAR */
-	sub	r6,r6,r26
-	ld	r6,0(r6)		/* get the value of _SDR1 */
-	mtspr	SPRN_SDR1,r6			/* set the htab location  */
-98: 
 	LOADADDR(r3,.start_here_common)
 	SET_REG_TO_CONST(r4, MSR_KERNEL)
 	mtspr	SPRN_SRR0,r3
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index 715bc0e..b879d30 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -26,7 +26,6 @@
 #include <asm/processor.h>
 #include <asm/cputable.h>
 #include <asm/time.h>
-#include <asm/systemcfg.h>
 #include <asm/machdep.h>
 #include <asm/smp.h>
 
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index 914632e..492bca6 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -78,12 +78,12 @@
 	mtlr	r0
 	blr
 
-_GLOBAL(call_handle_IRQ_event)
+_GLOBAL(call___do_IRQ)
 	mflr	r0
 	std	r0,16(r1)
-	stdu	r1,THREAD_SIZE-112(r6)
-	mr	r1,r6
-	bl	.handle_IRQ_event
+	stdu	r1,THREAD_SIZE-112(r5)
+	mr	r1,r5
+	bl	.__do_IRQ
 	ld	r1,0(r1)
 	ld	r0,16(r1)
 	mtlr	r0
diff --git a/arch/ppc64/kernel/nvram.c b/arch/ppc64/kernel/nvram.c
index 4fb1a9f..c0fcd29 100644
--- a/arch/ppc64/kernel/nvram.c
+++ b/arch/ppc64/kernel/nvram.c
@@ -31,7 +31,6 @@
 #include <asm/rtas.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
-#include <asm/systemcfg.h>
 
 #undef DEBUG_NVRAM
 
@@ -167,7 +166,7 @@
 	case IOC_NVRAM_GET_OFFSET: {
 		int part, offset;
 
-		if (systemcfg->platform != PLATFORM_POWERMAC)
+		if (_machine != PLATFORM_POWERMAC)
 			return -EINVAL;
 		if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
 			return -EFAULT;
@@ -450,7 +449,7 @@
 	 * in our nvram, as Apple defined partitions use pretty much
 	 * all of the space
 	 */
-	if (systemcfg->platform == PLATFORM_POWERMAC)
+	if (_machine == PLATFORM_POWERMAC)
 		return -ENOSPC;
 
 	/* see if we have an OS partition that meets our needs.
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index 30247ff..3cef1b8 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -548,6 +548,11 @@
 	if (ppc64_isabridge_dev != NULL)
 		printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
 
+#ifdef CONFIG_PPC_MULTIPLATFORM
+	/* map in PCI I/O space */
+	phbs_remap_io();
+#endif
+
 	printk("PCI: Probing PCI hardware done\n");
 
 	return 0;
@@ -1277,12 +1282,9 @@
 	 * G5 machines... So when something asks for bus 0 io base
 	 * (bus 0 is HT root), we return the AGP one instead.
 	 */
-#ifdef CONFIG_PPC_PMAC
-	if (systemcfg->platform == PLATFORM_POWERMAC &&
-	    machine_is_compatible("MacRISC4"))
+	if (machine_is_compatible("MacRISC4"))
 		if (in_bus == 0)
 			in_bus = 0xf0;
-#endif /* CONFIG_PPC_PMAC */
 
 	/* That syscall isn't quite compatible with PCI domains, but it's
 	 * used on pre-domains setup. We return the first match
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
index 1a443a7..12c4c9e 100644
--- a/arch/ppc64/kernel/pci_dn.c
+++ b/arch/ppc64/kernel/pci_dn.c
@@ -43,7 +43,7 @@
 	u32 *regs;
 	struct pci_dn *pdn;
 
-	if (phb->is_dynamic)
+	if (mem_init_done)
 		pdn = kmalloc(sizeof(*pdn), GFP_KERNEL);
 	else
 		pdn = alloc_bootmem(sizeof(*pdn));
@@ -120,6 +120,14 @@
 	return NULL;
 }
 
+/** 
+ * pci_devs_phb_init_dynamic - setup pci devices under this PHB
+ * phb: pci-to-host bridge (top-level bridge connecting to cpu)
+ *
+ * This routine is called both during boot, (before the memory
+ * subsystem is set up, before kmalloc is valid) and during the 
+ * dynamic lpar operation of adding a PHB to a running system.
+ */
 void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
 {
 	struct device_node * dn = (struct device_node *) phb->arch_data;
@@ -201,9 +209,14 @@
 	.notifier_call = pci_dn_reconfig_notifier,
 };
 
-/*
- * Actually initialize the phbs.
- * The buswalk on this phb has not happened yet.
+/** 
+ * pci_devs_phb_init - Initialize phbs and pci devs under them.
+ * 
+ * This routine walks over all phb's (pci-host bridges) on the
+ * system, and sets up assorted pci-related structures 
+ * (including pci info in the device node structs) for each
+ * pci device found underneath.  This routine runs once,
+ * early in the boot sequence.
  */
 void __init pci_devs_phb_init(void)
 {
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 3402fbe..fbad2c3 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -318,7 +318,7 @@
 		}
 
 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
-		if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
+		if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
 			char *name = get_property(ic->parent, "name", NULL);
 			if (name && !strcmp(name, "u3"))
 				np->intrs[intrcount].line += 128;
@@ -1065,7 +1065,7 @@
 	prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
 	if (prop == NULL)
 		return 0;
-	systemcfg->platform = *prop;
+	_machine = *prop;
 
 	/* check if iommu is forced on or off */
 	if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
@@ -1230,11 +1230,8 @@
 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
 	lmb_enforce_memory_limit(memory_limit);
 	lmb_analyze();
-	systemcfg->physicalMemorySize = lmb_phys_mem_size();
 	lmb_reserve(0, __pa(klimit));
 
-	DBG("Phys. mem: %lx\n", systemcfg->physicalMemorySize);
-
 	/* Reserve LMB regions used by kernel, initrd, dt, etc... */
 	early_reserve_mem();
 
@@ -1753,7 +1750,7 @@
 	/* We don't support that function on PowerMac, at least
 	 * not yet
 	 */
-	if (systemcfg->platform == PLATFORM_POWERMAC)
+	if (_machine == PLATFORM_POWERMAC)
 		return -ENODEV;
 
 	/* fix up new node's linux_phandle field */
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index e4c880da..6375f40 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -1934,7 +1934,8 @@
 	/*
 	 * On pSeries, inform the firmware about our capabilities
 	 */
-	if (RELOC(of_platform) & PLATFORM_PSERIES)
+	if (RELOC(of_platform) == PLATFORM_PSERIES ||
+	    RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
 		prom_send_capabilities();
 
 	/*
diff --git a/arch/ppc64/kernel/vdso.c b/arch/ppc64/kernel/vdso.c
index 4aacf52..1bbacac 100644
--- a/arch/ppc64/kernel/vdso.c
+++ b/arch/ppc64/kernel/vdso.c
@@ -34,6 +34,7 @@
 #include <asm/machdep.h>
 #include <asm/cputable.h>
 #include <asm/sections.h>
+#include <asm/systemcfg.h>
 #include <asm/vdso.h>
 
 #undef DEBUG
@@ -179,7 +180,7 @@
 	 * Last page is systemcfg.
 	 */
 	if ((vma->vm_end - address) <= PAGE_SIZE)
-		pg = virt_to_page(systemcfg);
+		pg = virt_to_page(_systemcfg);
 	else
 		pg = virt_to_page(vbase + offset);
 
@@ -604,7 +605,7 @@
 		get_page(pg);
 	}
 
-	get_page(virt_to_page(systemcfg));
+	get_page(virt_to_page(_systemcfg));
 }
 
 int in_gate_area_no_task(unsigned long addr)
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 0cded04..821c81e 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1511,8 +1511,8 @@
     // a.k.a. prepare the channel and remember that we have done so.
     
     tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel];
-    u16 rd_ptr;
-    u16 wr_ptr;
+    u32 rd_ptr;
+    u32 wr_ptr;
     u16 channel = vcc->channel;
     
     unsigned long flags;
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 4d5ed18..27bca34 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -147,7 +147,7 @@
 			printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
 			return -ENOMEM;
 		}
-		global_flush_tlb();
+		flush_agp_mappings();
 
 		bridge->scratch_page_real = virt_to_gart(addr);
 		bridge->scratch_page =
@@ -191,7 +191,7 @@
 	if (bridge->driver->needs_scratch_page) {
 		bridge->driver->agp_destroy_page(
 				gart_to_virt(bridge->scratch_page_real));
-		global_flush_tlb();
+		flush_agp_mappings();
 	}
 	if (got_gatt)
 		bridge->driver->free_gatt_table(bridge);
@@ -217,7 +217,7 @@
 	    bridge->driver->needs_scratch_page) {
 		bridge->driver->agp_destroy_page(
 				gart_to_virt(bridge->scratch_page_real));
-		global_flush_tlb();
+		flush_agp_mappings();
 	}
 }
 
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 19f242b..5567ce8 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -155,7 +155,7 @@
 		for (i = 0; i < curr->page_count; i++) {
 			curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
 		}
-		global_flush_tlb();
+		flush_agp_mappings();
 	}
 	agp_free_key(curr->key);
 	vfree(curr->memory);
@@ -213,8 +213,6 @@
 		new->memory[i] = virt_to_gart(addr);
 		new->page_count++;
 	}
-	global_flush_tlb();
-
 	new->bridge = bridge;
 
 	flush_agp_mappings();
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 70ef926..4e9a04e 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -180,11 +180,10 @@
 #define W83781D_REG_BANK 0x4E
 
 #define W83781D_REG_CONFIG 0x40
-#define W83781D_REG_ALARM1 0x41
-#define W83781D_REG_ALARM2 0x42
-#define W83781D_REG_ALARM3 0x450
+#define W83781D_REG_ALARM1 0x459
+#define W83781D_REG_ALARM2 0x45A
+#define W83781D_REG_ALARM3 0x45B
 
-#define W83781D_REG_IRQ 0x4C
 #define W83781D_REG_BEEP_CONFIG 0x4D
 #define W83781D_REG_BEEP_INTS1 0x56
 #define W83781D_REG_BEEP_INTS2 0x57
@@ -1370,13 +1369,6 @@
 					W83781D_REG_TEMP3_CONFIG, tmp & 0xfe);
 			}
 		}
-
-		/* enable comparator mode for temp2 and temp3 so
-	           alarm indication will work correctly */
-		i = w83627hf_read_value(client, W83781D_REG_IRQ);
-		if (!(i & 0x40))
-			w83627hf_write_value(client, W83781D_REG_IRQ,
-					    i | 0x40);
 	}
 
 	/* Start monitoring */
@@ -1400,7 +1392,7 @@
 			/* skip missing sensors */
 			if (((data->type == w83697hf) && (i == 1)) ||
 			    ((data->type == w83627thf || data->type == w83637hf)
-			    && (i == 4 || i == 5)))
+			    && (i == 5 || i == 6)))
 				continue;
 			data->in[i] =
 			    w83627hf_read_value(client, W83781D_REG_IN(i));
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index c9366b5..a2237d4 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -142,19 +142,18 @@
 	/* Make sure the SMBus host is ready to start transmitting */
 	if ((temp = inb_p(SMBHSTSTS)) & 0x1F) {
 		dev_dbg(&vt596_adapter.dev, "SMBus busy (0x%02x). "
-			"Resetting... ", temp);
+			"Resetting...\n", temp);
 
 		outb_p(temp, SMBHSTSTS);
 		if ((temp = inb_p(SMBHSTSTS)) & 0x1F) {
-			printk("Failed! (0x%02x)\n", temp);
+			dev_err(&vt596_adapter.dev, "SMBus reset failed! "
+				"(0x%02x)\n", temp);
 			return -1;
-		} else {
-			printk("Successful!\n");
 		}
 	}
 
 	/* Start the transaction by setting bit 6 */
-	outb_p(0x40 | (size & 0x3C), SMBHSTCNT);
+	outb_p(0x40 | size, SMBHSTCNT);
 
 	/* We will always wait for a fraction of a second */
 	do {
@@ -171,7 +170,7 @@
 	if (temp & 0x10) {
 		result = -1;
 		dev_err(&vt596_adapter.dev, "Transaction failed (0x%02x)\n",
-			inb_p(SMBHSTCNT) & 0x3C);
+			size);
 	}
 
 	if (temp & 0x08) {
@@ -180,11 +179,13 @@
 	}
 
 	if (temp & 0x04) {
+		int read = inb_p(SMBHSTADD) & 0x01;
 		result = -1;
-		/* Quick commands are used to probe for chips, so
-		   errors are expected, and we don't want to frighten the
-		   user. */
-		if ((inb_p(SMBHSTCNT) & 0x3C) != VT596_QUICK)
+		/* The quick and receive byte commands are used to probe
+		   for chips, so errors are expected, and we don't want
+		   to frighten the user. */
+		if (!((size == VT596_QUICK && !read) ||
+		      (size == VT596_BYTE && read)))
 			dev_err(&vt596_adapter.dev, "Transaction error!\n");
 	}
 
@@ -462,9 +463,9 @@
 	}
 }
 
-MODULE_AUTHOR(
-    "Frodo Looijaard <frodol@dds.nl> and "
-    "Philip Edelbrock <phil@netroedge.com>");
+MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>, "
+	      "Mark D. Studebaker <mdsxyz123@yahoo.com> and "
+	      "Jean Delvare <khali@linux-fr.org>");
 MODULE_DESCRIPTION("vt82c596 SMBus driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/i2c/chips/ds1337.c b/drivers/i2c/chips/ds1337.c
index 01b0370..02682fb 100644
--- a/drivers/i2c/chips/ds1337.c
+++ b/drivers/i2c/chips/ds1337.c
@@ -164,9 +164,9 @@
 	buf[1] = BIN2BCD(dt->tm_sec);
 	buf[2] = BIN2BCD(dt->tm_min);
 	buf[3] = BIN2BCD(dt->tm_hour);
-	buf[4] = BIN2BCD(dt->tm_wday) + 1;
+	buf[4] = BIN2BCD(dt->tm_wday + 1);
 	buf[5] = BIN2BCD(dt->tm_mday);
-	buf[6] = BIN2BCD(dt->tm_mon) + 1;
+	buf[6] = BIN2BCD(dt->tm_mon + 1);
 	val = dt->tm_year;
 	if (val >= 100) {
 		val -= 100;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index a737886..42e5b81 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -539,6 +539,15 @@
 
 	  It is safe to say Y to this question.
 
+config BLK_DEV_CS5535
+	tristate "AMD CS5535 chipset support"
+	depends on X86 && !X86_64
+	help
+	  Include support for UDMA on the NSC/AMD CS5535 companion chipset.
+	  This will automatically be detected and configured if found.
+
+	  It is safe to say Y to this question.
+
 config BLK_DEV_HPT34X
 	tristate "HPT34X chipset support"
 	help
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index e83f54d..f615ab7 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -2038,11 +2038,9 @@
 	struct ide_floppy_obj *floppy = ide_floppy_g(bdev->bd_disk);
 	ide_drive_t *drive = floppy->drive;
 	void __user *argp = (void __user *)arg;
-	int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
+	int err;
 	int prevent = (arg) ? 1 : 0;
 	idefloppy_pc_t pc;
-	if (err != -EINVAL)
-		return err;
 
 	switch (cmd) {
 	case CDROMEJECT:
@@ -2094,7 +2092,7 @@
 	case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS:
 		return idefloppy_get_format_progress(drive, argp);
 	}
- 	return -EINVAL;
+	return generic_ide_ioctl(drive, file, bdev, cmd, arg);
 }
 
 static int idefloppy_media_changed(struct gendisk *disk)
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 0b0aa4f..af7af95 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -104,8 +104,6 @@
 	hwif->INSL	= ide_insl;
 }
 
-EXPORT_SYMBOL(default_hwif_iops);
-
 /*
  *	MMIO operations, typically used for SATA controllers
  */
@@ -329,8 +327,6 @@
 	hwif->atapi_output_bytes	= atapi_output_bytes;
 }
 
-EXPORT_SYMBOL(default_hwif_transport);
-
 /*
  * Beginning of Taskfile OPCODE Library and feature sets.
  */
@@ -529,8 +525,6 @@
 	return 0;
 }
 
-EXPORT_SYMBOL(wait_for_ready);
-
 /*
  * This routine busy-waits for the drive status to be not "busy".
  * It then checks the status for all of the "good" bits and none
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 7ec18fa..54f9639 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -161,8 +161,6 @@
 	return ide_stopped;
 }
 
-EXPORT_SYMBOL(do_rw_taskfile);
-
 /*
  * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
  */
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 9fe1980..8af179b 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -803,6 +803,7 @@
 	hwif->irq = hw->irq;
 	hwif->noprobe = 0;
 	hwif->chipset = hw->chipset;
+	hwif->gendev.parent = hw->dev;
 
 	if (!initializing) {
 		probe_hwif_init_with_fixup(hwif, fixup);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 1dafffa..ef79805 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -182,13 +182,14 @@
     
 } /* ide_detach */
 
-static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq)
+static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle)
 {
     hw_regs_t hw;
     memset(&hw, 0, sizeof(hw));
     ide_init_hwif_ports(&hw, io, ctl, NULL);
     hw.irq = irq;
     hw.chipset = ide_pci;
+    hw.dev = &handle->dev;
     return ide_register_hw_with_fixup(&hw, NULL, ide_undecoded_slave);
 }
 
@@ -327,12 +328,12 @@
 
     /* retry registration in case device is still spinning up */
     for (hd = -1, i = 0; i < 10; i++) {
-	hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ);
+	hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, handle);
 	if (hd >= 0) break;
 	if (link->io.NumPorts1 == 0x20) {
 	    outb(0x02, ctl_base + 0x10);
 	    hd = idecs_register(io_base + 0x10, ctl_base + 0x10,
-				link->irq.AssignedIRQ);
+				link->irq.AssignedIRQ, handle);
 	    if (hd >= 0) {
 		io_base += 0x10;
 		ctl_base += 0x10;
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
index af46226..f35d684 100644
--- a/drivers/ide/pci/Makefile
+++ b/drivers/ide/pci/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_BLK_DEV_CMD64X)		+= cmd64x.o
 obj-$(CONFIG_BLK_DEV_CS5520)		+= cs5520.o
 obj-$(CONFIG_BLK_DEV_CS5530)		+= cs5530.o
+obj-$(CONFIG_BLK_DEV_CS5535)		+= cs5535.o
 obj-$(CONFIG_BLK_DEV_SC1200)		+= sc1200.o
 obj-$(CONFIG_BLK_DEV_CY82C693)		+= cy82c693.o
 obj-$(CONFIG_BLK_DEV_HPT34X)		+= hpt34x.o
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 844a6c9..21965e5 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -74,6 +74,7 @@
 	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE,	0x50, AMD_UDMA_133 },
 	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE,	0x50, AMD_UDMA_133 },
 	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE,	0x50, AMD_UDMA_133 },
+	{ PCI_DEVICE_ID_AMD_CS5536_IDE,			0x40, AMD_UDMA_100 },
 	{ 0 }
 };
 
@@ -491,6 +492,7 @@
 	/* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"),
 	/* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
 	/* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
+	/* 17 */ DECLARE_AMD_DEV("AMD5536"),
 };
 
 static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -527,6 +529,7 @@
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE,	PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE,	PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE,	PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
+	{ PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE,		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
 	{ 0, },
 };
 MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
new file mode 100644
index 0000000..6eb3051
--- /dev/null
+++ b/drivers/ide/pci/cs5535.c
@@ -0,0 +1,305 @@
+/*
+ * linux/drivers/ide/pci/cs5535.c
+ *
+ * Copyright (C) 2004-2005 Advanced Micro Devices, Inc.
+ *
+ * History:
+ * 09/20/2005 - Jaya Kumar <jayakumar.ide@gmail.com>
+ * - Reworked tuneproc, set_drive, misc mods to prep for mainline
+ * - Work was sponsored by CIS (M) Sdn Bhd.
+ * Ported to Kernel 2.6.11 on June 26, 2005 by
+ *   Wolfgang Zuleger <wolfgang.zuleger@gmx.de>
+ *   Alexander Kiausch <alex.kiausch@t-online.de>
+ * Originally developed by AMD for 2.4/2.6
+ *
+ * Development of this chipset driver was funded
+ * by the nice folks at National Semiconductor/AMD.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Documentation:
+ *  CS5535 documentation available from AMD
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+
+#include "ide-timing.h"
+
+#define MSR_ATAC_BASE		0x51300000
+#define ATAC_GLD_MSR_CAP	(MSR_ATAC_BASE+0)
+#define ATAC_GLD_MSR_CONFIG	(MSR_ATAC_BASE+0x01)
+#define ATAC_GLD_MSR_SMI	(MSR_ATAC_BASE+0x02)
+#define ATAC_GLD_MSR_ERROR	(MSR_ATAC_BASE+0x03)
+#define ATAC_GLD_MSR_PM		(MSR_ATAC_BASE+0x04)
+#define ATAC_GLD_MSR_DIAG	(MSR_ATAC_BASE+0x05)
+#define ATAC_IO_BAR		(MSR_ATAC_BASE+0x08)
+#define ATAC_RESET		(MSR_ATAC_BASE+0x10)
+#define ATAC_CH0D0_PIO		(MSR_ATAC_BASE+0x20)
+#define ATAC_CH0D0_DMA		(MSR_ATAC_BASE+0x21)
+#define ATAC_CH0D1_PIO		(MSR_ATAC_BASE+0x22)
+#define ATAC_CH0D1_DMA		(MSR_ATAC_BASE+0x23)
+#define ATAC_PCI_ABRTERR	(MSR_ATAC_BASE+0x24)
+#define ATAC_BM0_CMD_PRIM	0x00
+#define ATAC_BM0_STS_PRIM	0x02
+#define ATAC_BM0_PRD		0x04
+#define CS5535_CABLE_DETECT	0x48
+
+/* Format I PIO settings. We seperate out cmd and data for safer timings */
+
+static unsigned int cs5535_pio_cmd_timings[5] =
+{ 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131 };
+static unsigned int cs5535_pio_dta_timings[5] =
+{ 0xF7F4, 0xF173, 0x8141, 0x5131, 0x1131 };
+
+static unsigned int cs5535_mwdma_timings[3] =
+{ 0x7F0FFFF3, 0x7F035352, 0x7f024241 };
+
+static unsigned int cs5535_udma_timings[5] =
+{ 0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061 };
+
+/* Macros to check if the register is the reset value -  reset value is an
+   invalid timing and indicates the register has not been set previously */
+
+#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL) == 0x00009172 )
+#define CS5535_BAD_DMA(timings) ( (timings & 0x000FFFFF) == 0x00077771 )
+
+/****
+ *	cs5535_set_speed         -     Configure the chipset to the new speed
+ *	@drive: Drive to set up
+ *	@speed: desired speed
+ *
+ *	cs5535_set_speed() configures the chipset to a new speed.
+ */
+static void cs5535_set_speed(ide_drive_t *drive, u8 speed)
+{
+
+	u32 reg = 0, dummy;
+	int unit = drive->select.b.unit;
+
+
+	/* Set the PIO timings */
+	if ((speed & XFER_MODE) == XFER_PIO) {
+		u8 pioa;
+		u8 piob;
+		u8 cmd;
+
+		pioa = speed - XFER_PIO_0;
+		piob = ide_get_best_pio_mode(&(drive->hwif->drives[!unit]),
+						255, 4, NULL);
+		cmd = pioa < piob ? pioa : piob;
+
+		/* Write the speed of the current drive */
+		reg = (cs5535_pio_cmd_timings[cmd] << 16) |
+			cs5535_pio_dta_timings[pioa];
+		wrmsr(unit ? ATAC_CH0D1_PIO : ATAC_CH0D0_PIO, reg, 0);
+
+		/* And if nessesary - change the speed of the other drive */
+		rdmsr(unit ?  ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, dummy);
+
+		if (((reg >> 16) & cs5535_pio_cmd_timings[cmd]) !=
+			cs5535_pio_cmd_timings[cmd]) {
+			reg &= 0x0000FFFF;
+			reg |= cs5535_pio_cmd_timings[cmd] << 16;
+			wrmsr(unit ? ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, 0);
+		}
+
+		/* Set bit 31 of the DMA register for PIO format 1 timings */
+		rdmsr(unit ?  ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
+		wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA,
+					reg | 0x80000000UL, 0);
+	} else {
+		rdmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
+
+		reg &= 0x80000000UL;  /* Preserve the PIO format bit */
+
+		if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_7)
+			reg |= cs5535_udma_timings[speed - XFER_UDMA_0];
+		else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
+			reg |= cs5535_mwdma_timings[speed - XFER_MW_DMA_0];
+		else
+			return;
+
+		wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, 0);
+	}
+}
+
+static u8 cs5535_ratemask(ide_drive_t *drive)
+{
+	/* eighty93 will return 1 if it's 80core and capable of
+	exceeding udma2, 0 otherwise. we need ratemask to set
+	the max speed and if we can > udma2 then we return 2
+	which selects speed_max as udma4 which is the 5535's max
+	speed, and 1 selects udma2 which is the max for 40c */
+	if (!eighty_ninty_three(drive))
+		return 1;
+
+	return 2;
+}
+
+
+/****
+ *	cs5535_set_drive         -     Configure the drive to the new speed
+ *	@drive: Drive to set up
+ *	@speed: desired speed
+ *
+ *	cs5535_set_drive() configures the drive and the chipset to a
+ *	new speed. It also can be called by upper layers.
+ */
+static int cs5535_set_drive(ide_drive_t *drive, u8 speed)
+{
+	speed = ide_rate_filter(cs5535_ratemask(drive), speed);
+	ide_config_drive_speed(drive, speed);
+	cs5535_set_speed(drive, speed);
+
+	return 0;
+}
+
+/****
+ *	cs5535_tuneproc    -       PIO setup
+ *	@drive: drive to set up
+ *	@pio: mode to use (255 for 'best possible')
+ *
+ *	A callback from the upper layers for PIO-only tuning.
+ */
+static void cs5535_tuneproc(ide_drive_t *drive, u8 xferspeed)
+{
+	u8 modes[] = {	XFER_PIO_0, XFER_PIO_1, XFER_PIO_2, XFER_PIO_3,
+			XFER_PIO_4 };
+
+	/* cs5535 max pio is pio 4, best_pio will check the blacklist.
+	i think we don't need to rate_filter the incoming xferspeed
+	since we know we're only going to choose pio */
+	xferspeed = ide_get_best_pio_mode(drive, xferspeed, 4, NULL);
+	ide_config_drive_speed(drive, modes[xferspeed]);
+	cs5535_set_speed(drive, xferspeed);
+}
+
+static int cs5535_config_drive_for_dma(ide_drive_t *drive)
+{
+	u8 speed;
+
+	speed = ide_dma_speed(drive, cs5535_ratemask(drive));
+
+	/* If no DMA speed was available then let dma_check hit pio */
+	if (!speed) {
+		return 0;
+	}
+
+	cs5535_set_drive(drive, speed);
+	return ide_dma_enable(drive);
+}
+
+static int cs5535_dma_check(ide_drive_t *drive)
+{
+	ide_hwif_t *hwif	= drive->hwif;
+	struct hd_driveid *id	= drive->id;
+	u8 speed;
+
+	drive->init_speed = 0;
+
+	if ((id->capability & 1) && drive->autodma) {
+		if (ide_use_dma(drive)) {
+			if (cs5535_config_drive_for_dma(drive))
+				return hwif->ide_dma_on(drive);
+		}
+
+		goto fast_ata_pio;
+
+	} else if ((id->capability & 8) || (id->field_valid & 2)) {
+fast_ata_pio:
+		speed = ide_get_best_pio_mode(drive, 255, 4, NULL);
+		cs5535_set_drive(drive, speed);
+		return hwif->ide_dma_off_quietly(drive);
+	}
+	/* IORDY not supported */
+	return 0;
+}
+
+static u8 __devinit cs5535_cable_detect(struct pci_dev *dev)
+{
+	u8 bit;
+
+	/* if a 80 wire cable was detected */
+	pci_read_config_byte(dev, CS5535_CABLE_DETECT, &bit);
+	return (bit & 1);
+}
+
+/****
+ *	init_hwif_cs5535        -       Initialize one ide cannel
+ *	@hwif: Channel descriptor
+ *
+ *	This gets invoked by the IDE driver once for each channel. It
+ *	performs channel-specific pre-initialization before drive probing.
+ *
+ */
+static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
+{
+	int i;
+
+	hwif->autodma = 0;
+
+	hwif->tuneproc = &cs5535_tuneproc;
+	hwif->speedproc = &cs5535_set_drive;
+	hwif->ide_dma_check = &cs5535_dma_check;
+
+	hwif->atapi_dma = 1;
+	hwif->ultra_mask = 0x1F;
+	hwif->mwdma_mask = 0x07;
+
+
+	hwif->udma_four = cs5535_cable_detect(hwif->pci_dev);
+
+	if (!noautodma)
+		hwif->autodma = 1;
+
+	/* just setting autotune and not worrying about bios timings */
+	for (i = 0; i < 2; i++) {
+		hwif->drives[i].autotune = 1;
+		hwif->drives[i].autodma = hwif->autodma;
+	}
+}
+
+static ide_pci_device_t cs5535_chipset __devinitdata = {
+	.name		= "CS5535",
+	.init_hwif	= init_hwif_cs5535,
+	.channels	= 1,
+	.autodma	= AUTODMA,
+	.bootable	= ON_BOARD,
+};
+
+static int __devinit cs5535_init_one(struct pci_dev *dev,
+					const struct pci_device_id *id)
+{
+	return ide_setup_pci_device(dev, &cs5535_chipset);
+}
+
+static struct pci_device_id cs5535_pci_tbl[] =
+{
+	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_IDE, PCI_ANY_ID,
+		PCI_ANY_ID, 0, 0, 0},
+	{ 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, cs5535_pci_tbl);
+
+static struct pci_driver driver = {
+	.name       = "CS5535_IDE",
+	.id_table   = cs5535_pci_tbl,
+	.probe      = cs5535_init_one,
+};
+
+static int __init cs5535_ide_init(void)
+{
+	return ide_pci_register_driver(&driver);
+}
+
+module_init(cs5535_ide_init);
+
+MODULE_AUTHOR("AMD");
+MODULE_DESCRIPTION("PCI driver module for AMD/NS CS5535 IDE");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index 5a33513..9f41ecd 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -469,7 +469,7 @@
 
 static __devinitdata ide_hwif_t *primary;
 
-void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
+static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
 {
 	if (PCI_FUNC(hwif->pci_dev->devfn) == 1)
 		primary = hwif;
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 2b9961b..022d244 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -701,6 +701,7 @@
 	unsigned long barsize	= pci_resource_len(dev, 5);
 	u8 tmpbyte	= 0;
 	void __iomem *ioaddr;
+	u32 tmp, irq_mask;
 
 	/*
 	 *	Drop back to PIO if we can't map the mmio. Some
@@ -726,6 +727,14 @@
 	pci_set_drvdata(dev, (void *) ioaddr);
 
 	if (pdev_is_sata(dev)) {
+		/* make sure IDE0/1 interrupts are not masked */
+		irq_mask = (1 << 22) | (1 << 23);
+		tmp = readl(ioaddr + 0x48);
+		if (tmp & irq_mask) {
+			tmp &= ~irq_mask;
+			writel(tmp, ioaddr + 0x48);
+			readl(ioaddr + 0x48); /* flush */
+		}
 		writel(0, ioaddr + 0x148);
 		writel(0, ioaddr + 0x1C8);
 	}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index aed5ca2..5ea741f 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -31,7 +31,7 @@
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  *
- * $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $
+ * $Id: user_mad.c 4010 2005-11-09 23:11:56Z roland $
  */
 
 #include <linux/module.h>
@@ -110,13 +110,13 @@
 };
 
 struct ib_umad_file {
-	struct ib_umad_port *port;
-	struct list_head     recv_list;
-	struct list_head     port_list;
-	spinlock_t           recv_lock;
-	wait_queue_head_t    recv_wait;
-	struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
-	struct ib_mr        *mr[IB_UMAD_MAX_AGENTS];
+	struct ib_umad_port    *port;
+	struct list_head	recv_list;
+	struct list_head	port_list;
+	spinlock_t		recv_lock;
+	wait_queue_head_t	recv_wait;
+	struct ib_mad_agent    *agent[IB_UMAD_MAX_AGENTS];
+	int			agents_dead;
 };
 
 struct ib_umad_packet {
@@ -145,6 +145,12 @@
 	kfree(dev);
 }
 
+/* caller must hold port->mutex at least for reading */
+static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
+{
+	return file->agents_dead ? NULL : file->agent[id];
+}
+
 static int queue_packet(struct ib_umad_file *file,
 			struct ib_mad_agent *agent,
 			struct ib_umad_packet *packet)
@@ -152,10 +158,11 @@
 	int ret = 1;
 
 	down_read(&file->port->mutex);
+
 	for (packet->mad.hdr.id = 0;
 	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
 	     packet->mad.hdr.id++)
-		if (agent == file->agent[packet->mad.hdr.id]) {
+		if (agent == __get_agent(file, packet->mad.hdr.id)) {
 			spin_lock_irq(&file->recv_lock);
 			list_add_tail(&packet->list, &file->recv_list);
 			spin_unlock_irq(&file->recv_lock);
@@ -327,7 +334,7 @@
 
 	down_read(&file->port->mutex);
 
-	agent = file->agent[packet->mad.hdr.id];
+	agent = __get_agent(file, packet->mad.hdr.id);
 	if (!agent) {
 		ret = -EINVAL;
 		goto err_up;
@@ -481,7 +488,7 @@
 	}
 
 	for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
-		if (!file->agent[agent_id])
+		if (!__get_agent(file, agent_id))
 			goto found;
 
 	ret = -ENOMEM;
@@ -505,29 +512,15 @@
 		goto out;
 	}
 
-	file->agent[agent_id] = agent;
-
-	file->mr[agent_id] = ib_get_dma_mr(agent->qp->pd, IB_ACCESS_LOCAL_WRITE);
-	if (IS_ERR(file->mr[agent_id])) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
 	if (put_user(agent_id,
 		     (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
 		ret = -EFAULT;
-		goto err_mr;
+		ib_unregister_mad_agent(agent);
+		goto out;
 	}
 
+	file->agent[agent_id] = agent;
 	ret = 0;
-	goto out;
-
-err_mr:
-	ib_dereg_mr(file->mr[agent_id]);
-
-err:
-	file->agent[agent_id] = NULL;
-	ib_unregister_mad_agent(agent);
 
 out:
 	up_write(&file->port->mutex);
@@ -536,27 +529,29 @@
 
 static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
 {
+	struct ib_mad_agent *agent = NULL;
 	u32 id;
 	int ret = 0;
 
+	if (get_user(id, (u32 __user *) arg))
+		return -EFAULT;
+
 	down_write(&file->port->mutex);
 
-	if (get_user(id, (u32 __user *) arg)) {
-		ret = -EFAULT;
-		goto out;
-	}
-
-	if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !file->agent[id]) {
+	if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	ib_dereg_mr(file->mr[id]);
-	ib_unregister_mad_agent(file->agent[id]);
+	agent = file->agent[id];
 	file->agent[id] = NULL;
 
 out:
 	up_write(&file->port->mutex);
+
+	if (agent)
+		ib_unregister_mad_agent(agent);
+
 	return ret;
 }
 
@@ -621,23 +616,29 @@
 	struct ib_umad_file *file = filp->private_data;
 	struct ib_umad_device *dev = file->port->umad_dev;
 	struct ib_umad_packet *packet, *tmp;
+	int already_dead;
 	int i;
 
 	down_write(&file->port->mutex);
-	for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
-		if (file->agent[i]) {
-			ib_dereg_mr(file->mr[i]);
-			ib_unregister_mad_agent(file->agent[i]);
-		}
+
+	already_dead = file->agents_dead;
+	file->agents_dead = 1;
 
 	list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
 		kfree(packet);
 
 	list_del(&file->port_list);
-	up_write(&file->port->mutex);
+
+	downgrade_write(&file->port->mutex);
+
+	if (!already_dead)
+		for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
+			if (file->agent[i])
+				ib_unregister_mad_agent(file->agent[i]);
+
+	up_read(&file->port->mutex);
 
 	kfree(file);
-
 	kref_put(&dev->ref, ib_umad_release_dev);
 
 	return 0;
@@ -801,7 +802,7 @@
 		goto err_class;
 	port->sm_dev->owner = THIS_MODULE;
 	port->sm_dev->ops   = &umad_sm_fops;
-	kobject_set_name(&port->dev->kobj, "issm%d", port->dev_num);
+	kobject_set_name(&port->sm_dev->kobj, "issm%d", port->dev_num);
 	if (cdev_add(port->sm_dev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
 		goto err_sm_cdev;
 
@@ -863,14 +864,36 @@
 
 	port->ib_dev = NULL;
 
-	list_for_each_entry(file, &port->file_list, port_list)
-		for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) {
-			if (!file->agent[id])
-				continue;
-			ib_dereg_mr(file->mr[id]);
-			ib_unregister_mad_agent(file->agent[id]);
-			file->agent[id] = NULL;
-		}
+	/*
+	 * Now go through the list of files attached to this port and
+	 * unregister all of their MAD agents.  We need to hold
+	 * port->mutex while doing this to avoid racing with
+	 * ib_umad_close(), but we can't hold the mutex for writing
+	 * while calling ib_unregister_mad_agent(), since that might
+	 * deadlock by calling back into queue_packet().  So we
+	 * downgrade our lock to a read lock, and then drop and
+	 * reacquire the write lock for the next iteration.
+	 *
+	 * We do list_del_init() on the file's list_head so that the
+	 * list_del in ib_umad_close() is still OK, even after the
+	 * file is removed from the list.
+	 */
+	while (!list_empty(&port->file_list)) {
+		file = list_entry(port->file_list.next, struct ib_umad_file,
+				  port_list);
+
+		file->agents_dead = 1;
+		list_del_init(&file->port_list);
+
+		downgrade_write(&port->mutex);
+
+		for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
+			if (file->agent[id])
+				ib_unregister_mad_agent(file->agent[id]);
+
+		up_read(&port->mutex);
+		down_write(&port->mutex);
+	}
 
 	up_write(&port->mutex);
 
@@ -913,7 +936,7 @@
 
 err:
 	while (--i >= s)
-		ib_umad_kill_port(&umad_dev->port[i]);
+		ib_umad_kill_port(&umad_dev->port[i - s]);
 
 	kref_put(&umad_dev->ref, ib_umad_release_dev);
 }
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 63a7415..ed45da8 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -708,7 +708,7 @@
 		resp->wc[i].opcode 	   = wc[i].opcode;
 		resp->wc[i].vendor_err 	   = wc[i].vendor_err;
 		resp->wc[i].byte_len 	   = wc[i].byte_len;
-		resp->wc[i].imm_data 	   = wc[i].imm_data;
+		resp->wc[i].imm_data 	   = (__u32 __force) wc[i].imm_data;
 		resp->wc[i].qp_num 	   = wc[i].qp_num;
 		resp->wc[i].src_qp 	   = wc[i].src_qp;
 		resp->wc[i].wc_flags 	   = wc[i].wc_flags;
@@ -908,7 +908,12 @@
 	if (ret)
 		goto err_destroy;
 
-	resp.qp_handle = uobj->uobject.id;
+	resp.qp_handle       = uobj->uobject.id;
+	resp.max_recv_sge    = attr.cap.max_recv_sge;
+	resp.max_send_sge    = attr.cap.max_send_sge;
+	resp.max_recv_wr     = attr.cap.max_recv_wr;
+	resp.max_send_wr     = attr.cap.max_send_wr;
+	resp.max_inline_data = attr.cap.max_inline_data;
 
 	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 			 &resp, sizeof resp)) {
@@ -1135,7 +1140,7 @@
 		next->num_sge    = user_wr->num_sge;
 		next->opcode     = user_wr->opcode;
 		next->send_flags = user_wr->send_flags;
-		next->imm_data   = user_wr->imm_data;
+		next->imm_data   = (__be32 __force) user_wr->imm_data;
 
 		if (qp->qp_type == IB_QPT_UD) {
 			next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr,
@@ -1701,7 +1706,6 @@
 	}
 
 	attr.max_wr    = cmd.max_wr;
-	attr.max_sge   = cmd.max_sge;
 	attr.srq_limit = cmd.srq_limit;
 
 	ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 4186cc8..4c15e11 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -325,16 +325,8 @@
 int ib_resize_cq(struct ib_cq *cq,
                  int           cqe)
 {
-	int ret;
-
-	if (!cq->device->resize_cq)
-		return -ENOSYS;
-
-	ret = cq->device->resize_cq(cq, &cqe);
-	if (!ret)
-		cq->cqe = cqe;
-
-	return ret;
+	return cq->device->resize_cq ?
+		cq->device->resize_cq(cq, cqe) : -ENOSYS;
 }
 EXPORT_SYMBOL(ib_resize_cq);
 
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 25ebab6..c3bec74 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -97,7 +97,7 @@
 		}
 
 	spin_lock_irqsave(&catas_lock, flags);
-	if (dev->catas_err.stop)
+	if (!dev->catas_err.stop)
 		mod_timer(&dev->catas_err.timer,
 			  jiffies + MTHCA_CATAS_POLL_INTERVAL);
 	spin_unlock_irqrestore(&catas_lock, flags);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 49f211d..9ed3458 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1060,6 +1060,8 @@
 		dev_lim->hca.arbel.resize_srq = field & 1;
 		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
 		dev_lim->max_sg = min_t(int, field, dev_lim->max_sg);
+		MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
+		dev_lim->max_desc_sz = min_t(int, size, dev_lim->max_desc_sz);
 		MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
 		dev_lim->mpt_entry_sz = size;
 		MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index f98e235..4a8adce 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -258,7 +258,7 @@
 {
 	struct mthca_cq *cq;
 	struct mthca_cqe *cqe;
-	int prod_index;
+	u32 prod_index;
 	int nfreed = 0;
 
 	spin_lock_irq(&dev->cq_table.lock);
@@ -293,19 +293,15 @@
 	 * Now sweep backwards through the CQ, removing CQ entries
 	 * that match our QP by copying older entries on top of them.
 	 */
-	while (prod_index > cq->cons_index) {
-		cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
+	while ((int) --prod_index - (int) cq->cons_index >= 0) {
+		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
 		if (cqe->my_qpn == cpu_to_be32(qpn)) {
 			if (srq)
 				mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
 			++nfreed;
-		}
-		else if (nfreed)
-			memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
-				       cq->ibcq.cqe),
-			       cqe,
-			       MTHCA_CQ_ENTRY_SIZE);
-		--prod_index;
+		} else if (nfreed)
+			memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
+			       cqe, MTHCA_CQ_ENTRY_SIZE);
 	}
 
 	if (nfreed) {
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index e7e5d3b..497ff79 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -131,6 +131,7 @@
 	int      max_sg;
 	int      num_qps;
 	int      max_wqes;
+	int	 max_desc_sz;
 	int	 max_qp_init_rdma;
 	int      reserved_qps;
 	int      num_srqs;
@@ -154,6 +155,7 @@
 	int      reserved_mcgs;
 	int      num_pds;
 	int      reserved_pds;
+	u32      page_size_cap;
 	u32      flags;
 	u8       port_width_cap;
 };
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 45c6328..147f248 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -168,6 +168,7 @@
 	mdev->limits.max_srq_wqes       = dev_lim->max_srq_sz;
 	mdev->limits.reserved_srqs      = dev_lim->reserved_srqs;
 	mdev->limits.reserved_eecs      = dev_lim->reserved_eecs;
+	mdev->limits.max_desc_sz        = dev_lim->max_desc_sz;
 	/*
 	 * Subtract 1 from the limit because we need to allocate a
 	 * spare CQE so the HCA HW can tell the difference between an
@@ -181,6 +182,7 @@
 	mdev->limits.reserved_uars      = dev_lim->reserved_uars;
 	mdev->limits.reserved_pds       = dev_lim->reserved_pds;
 	mdev->limits.port_width_cap     = dev_lim->max_port_width;
+	mdev->limits.page_size_cap      = ~(u32) (dev_lim->min_page_sz - 1);
 	mdev->limits.flags              = dev_lim->flags;
 
 	/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 6b01666..4cc7e28 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -90,6 +90,7 @@
 	memcpy(&props->node_guid,      out_mad->data + 12, 8);
 
 	props->max_mr_size         = ~0ull;
+	props->page_size_cap       = mdev->limits.page_size_cap;
 	props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
 	props->max_qp_wr           = mdev->limits.max_wqes;
 	props->max_sge             = mdev->limits.max_sg;
@@ -615,11 +616,11 @@
 		return ERR_PTR(err);
 	}
 
-	init_attr->cap.max_inline_data = 0;
 	init_attr->cap.max_send_wr     = qp->sq.max;
 	init_attr->cap.max_recv_wr     = qp->rq.max;
 	init_attr->cap.max_send_sge    = qp->sq.max_gs;
 	init_attr->cap.max_recv_sge    = qp->rq.max_gs;
+	init_attr->cap.max_inline_data = qp->max_inline_data;
 
 	return &qp->ibqp;
 }
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index bcd4b01..1e73947 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -251,6 +251,7 @@
 	struct mthca_wq        sq;
 	enum ib_sig_type       sq_policy;
 	int                    send_wqe_offset;
+	int                    max_inline_data;
 
 	u64                   *wrid;
 	union mthca_buf	       queue;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 8852ea4..760c418d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -885,6 +885,48 @@
 	return err;
 }
 
+static void mthca_adjust_qp_caps(struct mthca_dev *dev,
+				 struct mthca_pd *pd,
+				 struct mthca_qp *qp)
+{
+	int max_data_size;
+
+	/*
+	 * Calculate the maximum size of WQE s/g segments, excluding
+	 * the next segment and other non-data segments.
+	 */
+	max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) -
+		sizeof (struct mthca_next_seg);
+
+	switch (qp->transport) {
+	case MLX:
+		max_data_size -= 2 * sizeof (struct mthca_data_seg);
+		break;
+
+	case UD:
+		if (mthca_is_memfree(dev))
+			max_data_size -= sizeof (struct mthca_arbel_ud_seg);
+		else
+			max_data_size -= sizeof (struct mthca_tavor_ud_seg);
+		break;
+
+	default:
+		max_data_size -= sizeof (struct mthca_raddr_seg);
+		break;
+	}
+
+	/* We don't support inline data for kernel QPs (yet). */
+	if (!pd->ibpd.uobject)
+		qp->max_inline_data = 0;
+        else
+		qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE;
+
+	qp->sq.max_gs = max_data_size / sizeof (struct mthca_data_seg);
+	qp->rq.max_gs = (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
+			sizeof (struct mthca_next_seg)) /
+			sizeof (struct mthca_data_seg);
+}
+
 /*
  * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
  * rq.max_gs and sq.max_gs must all be assigned.
@@ -902,27 +944,53 @@
 	size = sizeof (struct mthca_next_seg) +
 		qp->rq.max_gs * sizeof (struct mthca_data_seg);
 
+	if (size > dev->limits.max_desc_sz)
+		return -EINVAL;
+
 	for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
 	     qp->rq.wqe_shift++)
 		; /* nothing */
 
-	size = sizeof (struct mthca_next_seg) +
-		qp->sq.max_gs * sizeof (struct mthca_data_seg);
+	size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
 	switch (qp->transport) {
 	case MLX:
 		size += 2 * sizeof (struct mthca_data_seg);
 		break;
+
 	case UD:
-		if (mthca_is_memfree(dev))
-			size += sizeof (struct mthca_arbel_ud_seg);
-		else
-			size += sizeof (struct mthca_tavor_ud_seg);
+		size += mthca_is_memfree(dev) ?
+			sizeof (struct mthca_arbel_ud_seg) :
+			sizeof (struct mthca_tavor_ud_seg);
 		break;
+
+	case UC:
+		size += sizeof (struct mthca_raddr_seg);
+		break;
+
+	case RC:
+		size += sizeof (struct mthca_raddr_seg);
+		/*
+		 * An atomic op will require an atomic segment, a
+		 * remote address segment and one scatter entry.
+		 */
+		size = max_t(int, size,
+			     sizeof (struct mthca_atomic_seg) +
+			     sizeof (struct mthca_raddr_seg) +
+			     sizeof (struct mthca_data_seg));
+		break;
+
 	default:
-		/* bind seg is as big as atomic + raddr segs */
-		size += sizeof (struct mthca_bind_seg);
+		break;
 	}
 
+	/* Make sure that we have enough space for a bind request */
+	size = max_t(int, size, sizeof (struct mthca_bind_seg));
+
+	size += sizeof (struct mthca_next_seg);
+
+	if (size > dev->limits.max_desc_sz)
+		return -EINVAL;
+
 	for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
 	     qp->sq.wqe_shift++)
 		; /* nothing */
@@ -1066,6 +1134,8 @@
 		return ret;
 	}
 
+	mthca_adjust_qp_caps(dev, pd, qp);
+
 	/*
 	 * If this is a userspace QP, we're done now.  The doorbells
 	 * will be allocated and buffers will be initialized in
@@ -1486,8 +1556,8 @@
 				}
 
 				wqe += sizeof (struct mthca_atomic_seg);
-				size += sizeof (struct mthca_raddr_seg) / 16 +
-					sizeof (struct mthca_atomic_seg);
+				size += (sizeof (struct mthca_raddr_seg) +
+					 sizeof (struct mthca_atomic_seg)) / 16;
 				break;
 
 			case IB_WR_RDMA_WRITE:
@@ -1637,6 +1707,7 @@
 {
 	struct mthca_dev *dev = to_mdev(ibqp->device);
 	struct mthca_qp *qp = to_mqp(ibqp);
+	__be32 doorbell[2];
 	unsigned long flags;
 	int err = 0;
 	int nreq;
@@ -1654,6 +1725,22 @@
 	ind = qp->rq.next_ind;
 
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
+		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+			nreq = 0;
+
+			doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+			doorbell[1] = cpu_to_be32(qp->qpn << 8);
+
+			wmb();
+
+			mthca_write64(doorbell,
+				      dev->kar + MTHCA_RECEIVE_DOORBELL,
+				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+			qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+			size0 = 0;
+		}
+
 		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
 			mthca_err(dev, "RQ %06x full (%u head, %u tail,"
 					" %d max, %d nreq)\n", qp->qpn,
@@ -1711,8 +1798,6 @@
 
 out:
 	if (likely(nreq)) {
-		__be32 doorbell[2];
-
 		doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
 		doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
 
@@ -1806,8 +1891,8 @@
 				}
 
 				wqe += sizeof (struct mthca_atomic_seg);
-				size += sizeof (struct mthca_raddr_seg) / 16 +
-					sizeof (struct mthca_atomic_seg);
+				size += (sizeof (struct mthca_raddr_seg) +
+					 sizeof (struct mthca_atomic_seg)) / 16;
 				break;
 
 			case IB_WR_RDMA_READ:
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 26d5161..f7d2342 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -417,6 +417,7 @@
 {
 	struct mthca_dev *dev = to_mdev(ibsrq->device);
 	struct mthca_srq *srq = to_msrq(ibsrq);
+	__be32 doorbell[2];
 	unsigned long flags;
 	int err = 0;
 	int first_ind;
@@ -432,6 +433,25 @@
 	first_ind = srq->first_free;
 
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
+		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+			nreq = 0;
+
+			doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
+			doorbell[1] = cpu_to_be32(srq->srqn << 8);
+
+			/*
+			 * Make sure that descriptors are written
+			 * before doorbell is rung.
+			 */
+			wmb();
+
+			mthca_write64(doorbell,
+				      dev->kar + MTHCA_RECEIVE_DOORBELL,
+				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+			first_ind = srq->first_free;
+		}
+
 		ind = srq->first_free;
 
 		if (ind < 0) {
@@ -494,8 +514,6 @@
 	}
 
 	if (likely(nreq)) {
-		__be32 doorbell[2];
-
 		doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
 		doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
 
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
index 1f4c0ff..73f1c0b 100644
--- a/drivers/infiniband/hw/mthca/mthca_wqe.h
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -49,7 +49,8 @@
 };
 
 enum {
-	MTHCA_INVAL_LKEY = 0x100
+	MTHCA_INVAL_LKEY			= 0x100,
+	MTHCA_TAVOR_MAX_WQES_PER_RECV_DB	= 256
 };
 
 struct mthca_next_seg {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 0095acc..9923a15 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -179,6 +179,7 @@
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 	struct list_head fs_list;
 	struct dentry *mcg_dentry;
+	struct dentry *path_dentry;
 #endif
 };
 
@@ -270,7 +271,6 @@
 
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
-void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter);
 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
 				  union ib_gid *gid,
@@ -278,6 +278,11 @@
 				  unsigned int *queuelen,
 				  unsigned int *complete,
 				  unsigned int *send_only);
+
+struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev);
+int ipoib_path_iter_next(struct ipoib_path_iter *iter);
+void ipoib_path_iter_read(struct ipoib_path_iter *iter,
+			  struct ipoib_path *path);
 #endif
 
 int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
@@ -299,13 +304,13 @@
 int ipoib_pkey_dev_delay_open(struct net_device *dev);
 
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
-int ipoib_create_debug_file(struct net_device *dev);
-void ipoib_delete_debug_file(struct net_device *dev);
+void ipoib_create_debug_files(struct net_device *dev);
+void ipoib_delete_debug_files(struct net_device *dev);
 int ipoib_register_debugfs(void);
 void ipoib_unregister_debugfs(void);
 #else
-static inline int ipoib_create_debug_file(struct net_device *dev) { return 0; }
-static inline void ipoib_delete_debug_file(struct net_device *dev) { }
+static inline void ipoib_create_debug_files(struct net_device *dev) { }
+static inline void ipoib_delete_debug_files(struct net_device *dev) { }
 static inline int ipoib_register_debugfs(void) { return 0; }
 static inline void ipoib_unregister_debugfs(void) { }
 #endif
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 38b150f..685258e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -43,6 +43,18 @@
 
 static struct dentry *ipoib_root;
 
+static void format_gid(union ib_gid *gid, char *buf)
+{
+	int i, n;
+
+	for (n = 0, i = 0; i < 8; ++i) {
+		n += sprintf(buf + n, "%x",
+			     be16_to_cpu(((__be16 *) gid->raw)[i]));
+		if (i < 7)
+			buf[n++] = ':';
+	}
+}
+
 static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos)
 {
 	struct ipoib_mcast_iter *iter;
@@ -54,7 +66,7 @@
 
 	while (n--) {
 		if (ipoib_mcast_iter_next(iter)) {
-			ipoib_mcast_iter_free(iter);
+			kfree(iter);
 			return NULL;
 		}
 	}
@@ -70,7 +82,7 @@
 	(*pos)++;
 
 	if (ipoib_mcast_iter_next(iter)) {
-		ipoib_mcast_iter_free(iter);
+		kfree(iter);
 		return NULL;
 	}
 
@@ -87,32 +99,32 @@
 	struct ipoib_mcast_iter *iter = iter_ptr;
 	char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
 	union ib_gid mgid;
-	int i, n;
 	unsigned long created;
 	unsigned int queuelen, complete, send_only;
 
-	if (iter) {
-		ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
-				      &complete, &send_only);
+	if (!iter)
+		return 0;
 
-		for (n = 0, i = 0; i < sizeof mgid / 2; ++i) {
-			n += sprintf(gid_buf + n, "%x",
-				     be16_to_cpu(((__be16 *) mgid.raw)[i]));
-			if (i < sizeof mgid / 2 - 1)
-				gid_buf[n++] = ':';
-		}
-	}
+	ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
+			      &complete, &send_only);
 
-	seq_printf(file, "GID: %*s", -(1 + (int) sizeof gid_buf), gid_buf);
+	format_gid(&mgid, gid_buf);
 
 	seq_printf(file,
-		   " created: %10ld queuelen: %4d complete: %d send_only: %d\n",
-		   created, queuelen, complete, send_only);
+		   "GID: %s\n"
+		   "  created: %10ld\n"
+		   "  queuelen: %9d\n"
+		   "  complete: %9s\n"
+		   "  send_only: %8s\n"
+		   "\n",
+		   gid_buf, created, queuelen,
+		   complete ? "yes" : "no",
+		   send_only ? "yes" : "no");
 
 	return 0;
 }
 
-static struct seq_operations ipoib_seq_ops = {
+static struct seq_operations ipoib_mcg_seq_ops = {
 	.start = ipoib_mcg_seq_start,
 	.next  = ipoib_mcg_seq_next,
 	.stop  = ipoib_mcg_seq_stop,
@@ -124,7 +136,7 @@
 	struct seq_file *seq;
 	int ret;
 
-	ret = seq_open(file, &ipoib_seq_ops);
+	ret = seq_open(file, &ipoib_mcg_seq_ops);
 	if (ret)
 		return ret;
 
@@ -134,7 +146,7 @@
 	return 0;
 }
 
-static struct file_operations ipoib_fops = {
+static struct file_operations ipoib_mcg_fops = {
 	.owner   = THIS_MODULE,
 	.open    = ipoib_mcg_open,
 	.read    = seq_read,
@@ -142,25 +154,138 @@
 	.release = seq_release
 };
 
-int ipoib_create_debug_file(struct net_device *dev)
+static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos)
 {
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	char name[IFNAMSIZ + sizeof "_mcg"];
+	struct ipoib_path_iter *iter;
+	loff_t n = *pos;
 
-	snprintf(name, sizeof name, "%s_mcg", dev->name);
+	iter = ipoib_path_iter_init(file->private);
+	if (!iter)
+		return NULL;
 
-	priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
-					       ipoib_root, dev, &ipoib_fops);
+	while (n--) {
+		if (ipoib_path_iter_next(iter)) {
+			kfree(iter);
+			return NULL;
+		}
+	}
 
-	return priv->mcg_dentry ? 0 : -ENOMEM;
+	return iter;
 }
 
-void ipoib_delete_debug_file(struct net_device *dev)
+static void *ipoib_path_seq_next(struct seq_file *file, void *iter_ptr,
+				   loff_t *pos)
+{
+	struct ipoib_path_iter *iter = iter_ptr;
+
+	(*pos)++;
+
+	if (ipoib_path_iter_next(iter)) {
+		kfree(iter);
+		return NULL;
+	}
+
+	return iter;
+}
+
+static void ipoib_path_seq_stop(struct seq_file *file, void *iter_ptr)
+{
+	/* nothing for now */
+}
+
+static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
+{
+	struct ipoib_path_iter *iter = iter_ptr;
+	char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
+	struct ipoib_path path;
+	int rate;
+
+	if (!iter)
+		return 0;
+
+	ipoib_path_iter_read(iter, &path);
+
+	format_gid(&path.pathrec.dgid, gid_buf);
+
+	seq_printf(file,
+		   "GID: %s\n"
+		   "  complete: %6s\n",
+		   gid_buf, path.pathrec.dlid ? "yes" : "no");
+
+	if (path.pathrec.dlid) {
+		rate = ib_sa_rate_enum_to_int(path.pathrec.rate) * 25;
+
+		seq_printf(file,
+			   "  DLID:     0x%04x\n"
+			   "  SL: %12d\n"
+			   "  rate: %*d%s Gb/sec\n",
+			   be16_to_cpu(path.pathrec.dlid),
+			   path.pathrec.sl,
+			   10 - ((rate % 10) ? 2 : 0),
+			   rate / 10, rate % 10 ? ".5" : "");
+	}
+
+	seq_putc(file, '\n');
+
+	return 0;
+}
+
+static struct seq_operations ipoib_path_seq_ops = {
+	.start = ipoib_path_seq_start,
+	.next  = ipoib_path_seq_next,
+	.stop  = ipoib_path_seq_stop,
+	.show  = ipoib_path_seq_show,
+};
+
+static int ipoib_path_open(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq;
+	int ret;
+
+	ret = seq_open(file, &ipoib_path_seq_ops);
+	if (ret)
+		return ret;
+
+	seq = file->private_data;
+	seq->private = inode->u.generic_ip;
+
+	return 0;
+}
+
+static struct file_operations ipoib_path_fops = {
+	.owner   = THIS_MODULE,
+	.open    = ipoib_path_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+
+void ipoib_create_debug_files(struct net_device *dev)
+{
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	char name[IFNAMSIZ + sizeof "_path"];
+
+	snprintf(name, sizeof name, "%s_mcg", dev->name);
+	priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+					       ipoib_root, dev, &ipoib_mcg_fops);
+	if (!priv->mcg_dentry)
+		ipoib_warn(priv, "failed to create mcg debug file\n");
+
+	snprintf(name, sizeof name, "%s_path", dev->name);
+	priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+						ipoib_root, dev, &ipoib_path_fops);
+	if (!priv->path_dentry)
+		ipoib_warn(priv, "failed to create path debug file\n");
+}
+
+void ipoib_delete_debug_files(struct net_device *dev)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 
 	if (priv->mcg_dentry)
 		debugfs_remove(priv->mcg_dentry);
+	if (priv->path_dentry)
+		debugfs_remove(priv->path_dentry);
 }
 
 int ipoib_register_debugfs(void)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ce02962..2fa3075 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -58,6 +58,11 @@
 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
 #endif
 
+struct ipoib_path_iter {
+	struct net_device *dev;
+	struct ipoib_path  path;
+};
+
 static const u8 ipv4_bcast_addr[] = {
 	0x00, 0xff, 0xff, 0xff,
 	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
@@ -250,6 +255,64 @@
 	kfree(path);
 }
 
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+
+struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
+{
+	struct ipoib_path_iter *iter;
+
+	iter = kmalloc(sizeof *iter, GFP_KERNEL);
+	if (!iter)
+		return NULL;
+
+	iter->dev = dev;
+	memset(iter->path.pathrec.dgid.raw, 0, 16);
+
+	if (ipoib_path_iter_next(iter)) {
+		kfree(iter);
+		return NULL;
+	}
+
+	return iter;
+}
+
+int ipoib_path_iter_next(struct ipoib_path_iter *iter)
+{
+	struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
+	struct rb_node *n;
+	struct ipoib_path *path;
+	int ret = 1;
+
+	spin_lock_irq(&priv->lock);
+
+	n = rb_first(&priv->path_tree);
+
+	while (n) {
+		path = rb_entry(n, struct ipoib_path, rb_node);
+
+		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
+			   sizeof (union ib_gid)) < 0) {
+			iter->path = *path;
+			ret = 0;
+			break;
+		}
+
+		n = rb_next(n);
+	}
+
+	spin_unlock_irq(&priv->lock);
+
+	return ret;
+}
+
+void ipoib_path_iter_read(struct ipoib_path_iter *iter,
+			  struct ipoib_path *path)
+{
+	*path = iter->path;
+}
+
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
+
 void ipoib_flush_paths(struct net_device *dev)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -763,7 +826,7 @@
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
 
-	ipoib_delete_debug_file(dev);
+	ipoib_delete_debug_files(dev);
 
 	/* Delete any child interfaces first */
 	list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
@@ -972,8 +1035,7 @@
 		goto register_failed;
 	}
 
-	if (ipoib_create_debug_file(priv->dev))
-		goto debug_failed;
+	ipoib_create_debug_files(priv->dev);
 
 	if (ipoib_add_pkey_attr(priv->dev))
 		goto sysfs_failed;
@@ -987,9 +1049,7 @@
 	return priv->dev;
 
 sysfs_failed:
-	ipoib_delete_debug_file(priv->dev);
-
-debug_failed:
+	ipoib_delete_debug_files(priv->dev);
 	unregister_netdev(priv->dev);
 
 register_failed:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 3ecf78a..c33ed87 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -120,12 +120,8 @@
 	if (mcast->ah)
 		ipoib_put_ah(mcast->ah);
 
-	while (!skb_queue_empty(&mcast->pkt_queue)) {
-		struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
-
-		skb->dev = dev;
-		dev_kfree_skb_any(skb);
-	}
+	while (!skb_queue_empty(&mcast->pkt_queue))
+		dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
 
 	kfree(mcast);
 }
@@ -317,13 +313,8 @@
 					IPOIB_GID_ARG(mcast->mcmember.mgid), status);
 
 		/* Flush out any queued packets */
-		while (!skb_queue_empty(&mcast->pkt_queue)) {
-			struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
-
-			skb->dev = dev;
-
-			dev_kfree_skb_any(skb);
-		}
+		while (!skb_queue_empty(&mcast->pkt_queue))
+			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
 
 		/* Clear the busy flag so we try again */
 		clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
@@ -928,21 +919,16 @@
 		return NULL;
 
 	iter->dev = dev;
-	memset(iter->mgid.raw, 0, sizeof iter->mgid);
+	memset(iter->mgid.raw, 0, 16);
 
 	if (ipoib_mcast_iter_next(iter)) {
-		ipoib_mcast_iter_free(iter);
+		kfree(iter);
 		return NULL;
 	}
 
 	return iter;
 }
 
-void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter)
-{
-	kfree(iter);
-}
-
 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 332d730..d280b34 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -113,8 +113,7 @@
 
 	priv->parent = ppriv->dev;
 
-	if (ipoib_create_debug_file(priv->dev))
-		goto debug_failed;
+	ipoib_create_debug_files(priv->dev);
 
 	if (ipoib_add_pkey_attr(priv->dev))
 		goto sysfs_failed;
@@ -130,9 +129,7 @@
 	return 0;
 
 sysfs_failed:
-	ipoib_delete_debug_file(priv->dev);
-
-debug_failed:
+	ipoib_delete_debug_files(priv->dev);
 	unregister_netdev(priv->dev);
 
 register_failed:
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 8f46427..49fa1e4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2707,7 +2707,7 @@
 
 	if (j == entry_count) {
 		bp->flash_info = NULL;
-		printk(KERN_ALERT "Unknown flash/EEPROM type.\n");
+		printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
 		rc = -ENODEV;
 	}
 
@@ -3903,6 +3903,8 @@
 
 	pkt_size = 1514;
 	skb = dev_alloc_skb(pkt_size);
+	if (!skb)
+		return -ENOMEM;
 	packet = skb_put(skb, pkt_size);
 	memcpy(packet, bp->mac_addr, 6);
 	memset(packet + 6, 0x0, 8);
@@ -4798,11 +4800,7 @@
   	struct bnx2 *bp = dev->priv;
 	int rc;
 
-	if (eeprom->offset > bp->flash_info->total_size)
-		return -EINVAL;
-
-	if ((eeprom->offset + eeprom->len) > bp->flash_info->total_size)
-		eeprom->len = bp->flash_info->total_size - eeprom->offset;
+	/* parameters already validated in ethtool_get_eeprom */
 
 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
 
@@ -4816,11 +4814,7 @@
   	struct bnx2 *bp = dev->priv;
 	int rc;
 
-	if (eeprom->offset > bp->flash_info->total_size)
-		return -EINVAL;
-
-	if ((eeprom->offset + eeprom->len) > bp->flash_info->total_size)
-		eeprom->len = bp->flash_info->total_size - eeprom->offset;
+	/* parameters already validated in ethtool_set_eeprom */
 
 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
 
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 9342d5b..f5d49a1 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -37,6 +37,7 @@
 #include <linux/ethtool.h>
 #include <linux/bitops.h>
 #include <linux/fs.h>
+#include <linux/platform_device.h>
 
 #include <linux/vmalloc.h>
 #include <asm/pgtable.h>
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index a940b96..e67b1d0 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -34,6 +34,7 @@
 #include <linux/ethtool.h>
 #include <linux/bitops.h>
 #include <linux/fs.h>
+#include <linux/platform_device.h>
 
 #include <asm/immap_cpm2.h>
 #include <asm/mpc8260.h>
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 5ef4e84..2e8f444 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -34,6 +34,7 @@
 #include <linux/ethtool.h>
 #include <linux/bitops.h>
 #include <linux/fs.h>
+#include <linux/platform_device.h>
 
 #include <asm/irq.h>
 #include <asm/uaccess.h>
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index d8c6e9c..a3897fd 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -34,6 +34,7 @@
 #include <linux/ethtool.h>
 #include <linux/bitops.h>
 #include <linux/fs.h>
+#include <linux/platform_device.h>
 
 #include <asm/irq.h>
 #include <asm/uaccess.h>
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index fcb66b9..e8593a6 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -306,7 +306,7 @@
 {
 	struct pci_controller *phb;
 
-	if (PCI_DN(dn)->phb) {
+	if (PCI_DN(dn) && PCI_DN(dn)->phb) {
 		/* PHB already exists */
 		return -EINVAL;
 	}
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index ccf2003..309eb55 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -156,7 +156,7 @@
 
 config PCMCIA_M8XX
         tristate "MPC8xx PCMCIA support"
-        depends on PCMCIA && PPC
+        depends on PCMCIA && PPC && 8xx 
         select PCCARD_NONSTATIC
         help
         Say Y here to include support for PowerPC 8xx series PCMCIA
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index fe37541..bcecf51 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -25,7 +25,7 @@
 obj-$(CONFIG_I82365)				+= i82365.o
 obj-$(CONFIG_I82092)				+= i82092.o
 obj-$(CONFIG_TCIC)				+= tcic.o
-obj-$(CONFIG_PCMCIA_M8XX)                              += m8xx_pcmcia.o
+obj-$(CONFIG_PCMCIA_M8XX)			+= m8xx_pcmcia.o
 obj-$(CONFIG_HD64465_PCMCIA)			+= hd64465_ss.o
 obj-$(CONFIG_PCMCIA_SA1100)			+= sa11xx_core.o sa1100_cs.o
 obj-$(CONFIG_PCMCIA_SA1111)			+= sa11xx_core.o sa1111_cs.o
@@ -47,10 +47,10 @@
 au1x00_ss-$(CONFIG_MIPS_PB1500)			+= au1000_pb1x00.o
 au1x00_ss-$(CONFIG_MIPS_DB1000)			+= au1000_db1x00.o
 au1x00_ss-$(CONFIG_MIPS_DB1100)			+= au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_DB1200)                 += au1000_db1x00.o
+au1x00_ss-$(CONFIG_MIPS_DB1200)			+= au1000_db1x00.o
 au1x00_ss-$(CONFIG_MIPS_DB1500)			+= au1000_db1x00.o
 au1x00_ss-$(CONFIG_MIPS_DB1550)			+= au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_XXS1500)               += au1000_xxs1500.o
+au1x00_ss-$(CONFIG_MIPS_XXS1500)		+= au1000_xxs1500.o
 
 sa1111_cs-y					+= sa1111_generic.o
 sa1111_cs-$(CONFIG_ASSABET_NEPONSET)		+= sa1100_neponset.o
diff --git a/drivers/pcmcia/au1000_db1x00.c b/drivers/pcmcia/au1000_db1x00.c
index 24cfee1..abc13f2 100644
--- a/drivers/pcmcia/au1000_db1x00.c
+++ b/drivers/pcmcia/au1000_db1x00.c
@@ -30,6 +30,7 @@
  *
  */
 
+#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
index b0e7908..f2c970b 100644
--- a/drivers/pcmcia/au1000_generic.h
+++ b/drivers/pcmcia/au1000_generic.h
@@ -22,6 +22,8 @@
 #define __ASM_AU1000_PCMCIA_H
 
 /* include the world */
+#include <linux/config.h>
+
 #include <pcmcia/cs_types.h>
 #include <pcmcia/cs.h>
 #include <pcmcia/ss.h>
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
index 86c0808..fd5522e 100644
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ b/drivers/pcmcia/au1000_pb1x00.c
@@ -21,6 +21,7 @@
  *  with this program; if not, write to the Free Software Foundation, Inc.,
  *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
  */
+#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
diff --git a/drivers/pcmcia/au1000_xxs1500.c b/drivers/pcmcia/au1000_xxs1500.c
index 01a895b..01874b0 100644
--- a/drivers/pcmcia/au1000_xxs1500.c
+++ b/drivers/pcmcia/au1000_xxs1500.c
@@ -27,7 +27,6 @@
  */
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/config.h>
 #include <linux/delay.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 7ce455d..4ddd762 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1366,6 +1366,7 @@
     if (sockets == 0) {
 	printk("not found.\n");
 	platform_device_unregister(&i82365_device);
+	release_region(i365_base, 2);
 	driver_unregister(&i82365_driver);
 	return -ENODEV;
     }
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index f8bed87..6d9f71c 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -39,7 +39,6 @@
 
 #include <asm/io.h>
 #include <asm/bitops.h>
-#include <asm/segment.h>
 #include <asm/system.h>
 
 #include <linux/kernel.h>
@@ -50,6 +49,7 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/platform_device.h>
 
 #include <asm/mpc8xx.h>
 #include <asm/8xx_immap.h>
@@ -546,29 +546,11 @@
 	free_irq(pcmcia_schlvl, NULL);
 }
 
-/* copied from tcic.c */
-
-static int m8xx_drv_suspend(struct device *dev, pm_message_t state, u32 level)
-{
-        int ret = 0;
-        if (level == SUSPEND_SAVE_STATE)
-                ret = pcmcia_socket_dev_suspend(dev, state);
-        return ret;
-}
-
-static int m8xx_drv_resume(struct device *dev, u32 level)
-{
-        int ret = 0;
-        if (level == RESUME_RESTORE_STATE)
-                ret = pcmcia_socket_dev_resume(dev);
-        return ret;
-}
-
 static struct device_driver m8xx_driver = {
         .name = "m8xx-pcmcia",
         .bus = &platform_bus_type,
-        .suspend = m8xx_drv_suspend,
-        .resume = m8xx_drv_resume,
+        .suspend = pcmcia_socket_dev_suspend,
+        .resume = pcmcia_socket_dev_resume,
 };
 
 static struct platform_device m8xx_device = {
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index c888af4..3553da0 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -395,6 +395,7 @@
 	int log = test_bit(IDESCSI_LOG_CMD, &scsi->log);
 	struct Scsi_Host *host;
 	u8 *scsi_buf;
+	int errors = rq->errors;
 	unsigned long flags;
 
 	if (!(rq->flags & (REQ_SPECIAL|REQ_SENSE))) {
@@ -421,11 +422,11 @@
 			printk (KERN_WARNING "ide-scsi: %s: timed out for %lu\n",
 					drive->name, pc->scsi_cmd->serial_number);
 		pc->scsi_cmd->result = DID_TIME_OUT << 16;
-	} else if (rq->errors >= ERROR_MAX) {
+	} else if (errors >= ERROR_MAX) {
 		pc->scsi_cmd->result = DID_ERROR << 16;
 		if (log)
 			printk ("ide-scsi: %s: I/O error for %lu\n", drive->name, pc->scsi_cmd->serial_number);
-	} else if (rq->errors) {
+	} else if (errors) {
 		if (log)
 			printk ("ide-scsi: %s: check condition for %lu\n", drive->name, pc->scsi_cmd->serial_number);
 		if (!idescsi_check_condition(drive, rq))
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index 4867498..bd9a699 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -48,6 +48,12 @@
 	corgibl_mach_set_intensity(intensity);
 
 	spin_unlock_irqrestore(&bl_lock, flags);
+
+ 	corgi_kick_batt = symbol_get(sharpsl_battery_kick);
+ 	if (corgi_kick_batt) {
+ 		corgi_kick_batt();
+ 		symbol_put(sharpsl_battery_kick);
+ 	}
 }
 
 static void corgibl_blank(int blank)
diff --git a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h
index 68934a2..6126afe 100644
--- a/include/asm-alpha/ide.h
+++ b/include/asm-alpha/ide.h
@@ -15,10 +15,6 @@
 
 #include <linux/config.h>
 
-#ifndef MAX_HWIFS
-#define MAX_HWIFS	CONFIG_IDE_MAX_HWIFS
-#endif
-
 #define IDE_ARCH_OBSOLETE_DEFAULTS
 
 static inline int ide_default_irq(unsigned long base)
diff --git a/include/asm-arm/arch-ixp4xx/hardware.h b/include/asm-arm/arch-ixp4xx/hardware.h
index 55d85ee..cfb413c 100644
--- a/include/asm-arm/arch-ixp4xx/hardware.h
+++ b/include/asm-arm/arch-ixp4xx/hardware.h
@@ -44,5 +44,6 @@
 #include "ixdp425.h"
 #include "coyote.h"
 #include "prpmc1100.h"
+#include "nslu2.h"
 
 #endif  /* _ASM_ARCH_HARDWARE_H */
diff --git a/include/asm-arm/arch-ixp4xx/irqs.h b/include/asm-arm/arch-ixp4xx/irqs.h
index ca80828..2cf4930 100644
--- a/include/asm-arm/arch-ixp4xx/irqs.h
+++ b/include/asm-arm/arch-ixp4xx/irqs.h
@@ -93,4 +93,11 @@
 #define	IRQ_COYOTE_PCI_SLOT1	IRQ_IXP4XX_GPIO11
 #define	IRQ_COYOTE_IDE		IRQ_IXP4XX_GPIO5
 
+/*
+ * NSLU2 board IRQs
+ */
+#define        IRQ_NSLU2_PCI_INTA      IRQ_IXP4XX_GPIO11
+#define        IRQ_NSLU2_PCI_INTB      IRQ_IXP4XX_GPIO10
+#define        IRQ_NSLU2_PCI_INTC      IRQ_IXP4XX_GPIO9
+
 #endif
diff --git a/include/asm-arm/arch-ixp4xx/nslu2.h b/include/asm-arm/arch-ixp4xx/nslu2.h
new file mode 100644
index 0000000..b8b347a
--- /dev/null
+++ b/include/asm-arm/arch-ixp4xx/nslu2.h
@@ -0,0 +1,96 @@
+/*
+ * include/asm-arm/arch-ixp4xx/nslu2.h
+ *
+ * NSLU2 platform specific definitions
+ *
+ * Author: Mark Rakes <mrakes AT mac.com>
+ * Maintainers: http://www.nslu2-linux.org
+ *
+ * based on ixdp425.h:
+ *	Copyright 2004 (c) MontaVista, Software, Inc.
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_HARDWARE_H__
+#error "Do not include this directly, instead #include <asm/hardware.h>"
+#endif
+
+#define NSLU2_FLASH_BASE	IXP4XX_EXP_BUS_CS0_BASE_PHYS
+#define NSLU2_FLASH_SIZE	IXP4XX_EXP_BUS_CSX_REGION_SIZE
+
+#define NSLU2_SDA_PIN		7
+#define NSLU2_SCL_PIN		6
+
+/*
+ * NSLU2 PCI IRQs
+ */
+#define NSLU2_PCI_MAX_DEV	3
+#define NSLU2_PCI_IRQ_LINES	3
+
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define NSLU2_PCI_INTA_PIN	11
+#define NSLU2_PCI_INTB_PIN	10
+#define NSLU2_PCI_INTC_PIN	9
+#define NSLU2_PCI_INTD_PIN	8
+
+
+/* NSLU2 Timer */
+#define NSLU2_FREQ 66000000
+#define NSLU2_CLOCK_TICK_RATE (((NSLU2_FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
+#define NSLU2_CLOCK_TICKS_PER_USEC ((NSLU2_CLOCK_TICK_RATE + USEC_PER_SEC/2) / USEC_PER_SEC)
+
+/* GPIO */
+
+#define NSLU2_GPIO0		0
+#define NSLU2_GPIO1		1
+#define NSLU2_GPIO2		2
+#define NSLU2_GPIO3		3
+#define NSLU2_GPIO4		4
+#define NSLU2_GPIO5		5
+#define NSLU2_GPIO6		6
+#define NSLU2_GPIO7		7
+#define NSLU2_GPIO8		8
+#define NSLU2_GPIO9		9
+#define NSLU2_GPIO10		10
+#define NSLU2_GPIO11		11
+#define NSLU2_GPIO12		12
+#define NSLU2_GPIO13		13
+#define NSLU2_GPIO14		14
+#define NSLU2_GPIO15		15
+
+/* Buttons */
+
+#define NSLU2_PB_GPIO		NSLU2_GPIO5
+#define NSLU2_PO_GPIO		NSLU2_GPIO8	/* power off */
+#define NSLU2_RB_GPIO		NSLU2_GPIO12
+
+#define NSLU2_PB_IRQ		IRQ_IXP4XX_GPIO5
+#define NSLU2_RB_IRQ		IRQ_IXP4XX_GPIO12
+
+#define NSLU2_PB_BM		(1L << NSLU2_PB_GPIO)
+#define NSLU2_PO_BM		(1L << NSLU2_PO_GPIO)
+#define NSLU2_RB_BM		(1L << NSLU2_RB_GPIO)
+
+/* Buzzer */
+
+#define NSLU2_GPIO_BUZZ		4
+#define NSLU2_BZ_BM		(1L << NSLU2_GPIO_BUZZ)
+/* LEDs */
+
+#define NSLU2_LED_RED		NSLU2_GPIO0
+#define NSLU2_LED_GRN		NSLU2_GPIO1
+
+#define NSLU2_LED_RED_BM	(1L << NSLU2_LED_RED)
+#define NSLU2_LED_GRN_BM	(1L << NSLU2_LED_GRN)
+
+#define NSLU2_LED_DISK1		NSLU2_GPIO2
+#define NSLU2_LED_DISK2		NSLU2_GPIO3
+
+#define NSLU2_LED_DISK1_BM	(1L << NSLU2_GPIO2)
+#define NSLU2_LED_DISK2_BM	(1L << NSLU2_GPIO3)
+
+
diff --git a/include/asm-arm/arch-omap/board-h4.h b/include/asm-arm/arch-omap/board-h4.h
index d64ee92..33ea29a 100644
--- a/include/asm-arm/arch-omap/board-h4.h
+++ b/include/asm-arm/arch-omap/board-h4.h
@@ -34,5 +34,11 @@
 #define OMAP24XX_ETHR_START             0x08000300
 #define OMAP24XX_ETHR_GPIO_IRQ		92
 
+#define H4_CS0_BASE		     0x04000000
+
+#define H4_CS0_BASE		     0x04000000
+
+#define H4_CS0_BASE		     0x04000000
+
 #endif /*  __ASM_ARCH_OMAP_H4_H */
 
diff --git a/include/asm-arm/arch-omap/board-innovator.h b/include/asm-arm/arch-omap/board-innovator.h
index 79574e0..b3cf334 100644
--- a/include/asm-arm/arch-omap/board-innovator.h
+++ b/include/asm-arm/arch-omap/board-innovator.h
@@ -26,7 +26,7 @@
 #ifndef __ASM_ARCH_OMAP_INNOVATOR_H
 #define __ASM_ARCH_OMAP_INNOVATOR_H
 
-#if defined (CONFIG_ARCH_OMAP1510)
+#if defined (CONFIG_ARCH_OMAP15XX)
 
 #ifndef OMAP_SDRAM_DEVICE
 #define OMAP_SDRAM_DEVICE			D256M_1X16_4B
@@ -44,7 +44,7 @@
 unsigned char fpga_read(int reg);
 #endif
 
-#endif /* CONFIG_ARCH_OMAP1510 */
+#endif /* CONFIG_ARCH_OMAP15XX */
 
 #if defined (CONFIG_ARCH_OMAP16XX)
 
diff --git a/include/asm-arm/arch-omap/clock.h b/include/asm-arm/arch-omap/clock.h
new file mode 100644
index 0000000..740c297
--- /dev/null
+++ b/include/asm-arm/arch-omap/clock.h
@@ -0,0 +1,91 @@
+/*
+ *  linux/include/asm-arm/arch-omap/clock.h
+ *
+ *  Copyright (C) 2004 - 2005 Nokia corporation
+ *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *  Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_OMAP_CLOCK_H
+#define __ARCH_ARM_OMAP_CLOCK_H
+
+struct module;
+
+struct clk {
+	struct list_head	node;
+	struct module		*owner;
+	const char		*name;
+	struct clk		*parent;
+	unsigned long		rate;
+	__u32			flags;
+	void __iomem		*enable_reg;
+	__u8			enable_bit;
+	__u8			rate_offset;
+	__u8			src_offset;
+	__s8			usecount;
+	void			(*recalc)(struct clk *);
+	int			(*set_rate)(struct clk *, unsigned long);
+	long			(*round_rate)(struct clk *, unsigned long);
+	void			(*init)(struct clk *);
+	int			(*enable)(struct clk *);
+	void			(*disable)(struct clk *);
+};
+
+struct clk_functions {
+	int		(*clk_enable)(struct clk *clk);
+	void		(*clk_disable)(struct clk *clk);
+	int		(*clk_use)(struct clk *clk);
+	void		(*clk_unuse)(struct clk *clk);
+	long		(*clk_round_rate)(struct clk *clk, unsigned long rate);
+	int		(*clk_set_rate)(struct clk *clk, unsigned long rate);
+	int		(*clk_set_parent)(struct clk *clk, struct clk *parent);
+	struct clk *	(*clk_get_parent)(struct clk *clk);
+	void		(*clk_allow_idle)(struct clk *clk);
+	void		(*clk_deny_idle)(struct clk *clk);
+};
+
+extern unsigned int mpurate;
+extern struct list_head clocks;
+extern spinlock_t clockfw_lock;
+
+extern int clk_init(struct clk_functions * custom_clocks);
+extern int clk_register(struct clk *clk);
+extern void clk_unregister(struct clk *clk);
+extern void propagate_rate(struct clk *clk);
+extern void followparent_recalc(struct clk * clk);
+extern void clk_allow_idle(struct clk *clk);
+extern void clk_deny_idle(struct clk *clk);
+
+/* Clock flags */
+#define RATE_CKCTL		(1 << 0)	/* Main fixed ratio clocks */
+#define RATE_FIXED		(1 << 1)	/* Fixed clock rate */
+#define RATE_PROPAGATES		(1 << 2)	/* Program children too */
+#define VIRTUAL_CLOCK		(1 << 3)	/* Composite clock from table */
+#define ALWAYS_ENABLED		(1 << 4)	/* Clock cannot be disabled */
+#define ENABLE_REG_32BIT	(1 << 5)	/* Use 32-bit access */
+#define VIRTUAL_IO_ADDRESS	(1 << 6)	/* Clock in virtual address */
+#define CLOCK_IDLE_CONTROL	(1 << 7)
+#define CLOCK_NO_IDLE_PARENT	(1 << 8)
+#define DELAYED_APP		(1 << 9)	/* Delay application of clock */
+#define CONFIG_PARTICIPANT	(1 << 10)	/* Fundamental clock */
+#define CM_MPU_SEL1		(1 << 11)	/* Domain divider/source */
+#define CM_DSP_SEL1		(1 << 12)
+#define CM_GFX_SEL1		(1 << 13)
+#define CM_MODEM_SEL1		(1 << 14)
+#define CM_CORE_SEL1		(1 << 15)	/* Sets divider for many */
+#define CM_CORE_SEL2		(1 << 16)	/* sets parent for GPT */
+#define CM_WKUP_SEL1		(1 << 17)
+#define CM_PLL_SEL1		(1 << 18)
+#define CM_PLL_SEL2		(1 << 19)
+#define CM_SYSCLKOUT_SEL1	(1 << 20)
+#define CLOCK_IN_OMAP730	(1 << 21)
+#define CLOCK_IN_OMAP1510	(1 << 22)
+#define CLOCK_IN_OMAP16XX	(1 << 23)
+#define CLOCK_IN_OMAP242X	(1 << 24)
+#define CLOCK_IN_OMAP243X	(1 << 25)
+
+#endif
diff --git a/include/asm-arm/arch-omap/common.h b/include/asm-arm/arch-omap/common.h
index 2a676b4..08d58ab 100644
--- a/include/asm-arm/arch-omap/common.h
+++ b/include/asm-arm/arch-omap/common.h
@@ -31,6 +31,6 @@
 
 extern void omap_map_common_io(void);
 extern struct sys_timer omap_timer;
-extern void omap_serial_init(int ports[]);
+extern void omap_serial_init(void);
 
 #endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
diff --git a/include/asm-arm/arch-omap/cpu.h b/include/asm-arm/arch-omap/cpu.h
index 1119e2b..ec7eb67 100644
--- a/include/asm-arm/arch-omap/cpu.h
+++ b/include/asm-arm/arch-omap/cpu.h
@@ -28,12 +28,7 @@
 
 extern unsigned int system_rev;
 
-#define OMAP_DIE_ID_0		0xfffe1800
-#define OMAP_DIE_ID_1		0xfffe1804
-#define OMAP_PRODUCTION_ID_0	0xfffe2000
-#define OMAP_PRODUCTION_ID_1	0xfffe2004
-#define OMAP32_ID_0		0xfffed400
-#define OMAP32_ID_1		0xfffed404
+#define omap2_cpu_rev()		((system_rev >> 8) & 0x0f)
 
 /*
  * Test if multicore OMAP support is needed
@@ -50,7 +45,7 @@
 #  define OMAP_NAME omap730
 # endif
 #endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 # ifdef OMAP_NAME
 #  undef  MULTI_OMAP1
 #  define MULTI_OMAP1
@@ -79,9 +74,11 @@
  * Macros to group OMAP into cpu classes.
  * These can be used in most places.
  * cpu_is_omap7xx():	True for OMAP730
- * cpu_is_omap15xx():	True for OMAP1510 and OMAP5910
+ * cpu_is_omap15xx():	True for OMAP1510, OMAP5910 and OMAP310
  * cpu_is_omap16xx():	True for OMAP1610, OMAP5912 and OMAP1710
- * cpu_is_omap24xx():	True for OMAP2420
+ * cpu_is_omap24xx():	True for OMAP2420, OMAP2422, OMAP2423, OMAP2430
+ * cpu_is_omap242x():	True for OMAP2420, OMAP2422, OMAP2423
+ * cpu_is_omap243x():	True for OMAP2430
  */
 #define GET_OMAP_CLASS	(system_rev & 0xff)
 
@@ -91,22 +88,35 @@
 	return (GET_OMAP_CLASS == (id)) ? 1 : 0;	\
 }
 
+#define GET_OMAP_SUBCLASS	((system_rev >> 20) & 0x0fff)
+
+#define IS_OMAP_SUBCLASS(subclass, id)			\
+static inline int is_omap ##subclass (void)		\
+{							\
+	return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0;	\
+}
+
 IS_OMAP_CLASS(7xx, 0x07)
 IS_OMAP_CLASS(15xx, 0x15)
 IS_OMAP_CLASS(16xx, 0x16)
 IS_OMAP_CLASS(24xx, 0x24)
 
+IS_OMAP_SUBCLASS(242x, 0x242)
+IS_OMAP_SUBCLASS(243x, 0x243)
+
 #define cpu_is_omap7xx()		0
 #define cpu_is_omap15xx()		0
 #define cpu_is_omap16xx()		0
 #define cpu_is_omap24xx()		0
+#define cpu_is_omap242x()		0
+#define cpu_is_omap243x()		0
 
 #if defined(MULTI_OMAP1)
 # if defined(CONFIG_ARCH_OMAP730)
 #  undef  cpu_is_omap7xx
 #  define cpu_is_omap7xx()		is_omap7xx()
 # endif
-# if defined(CONFIG_ARCH_OMAP1510)
+# if defined(CONFIG_ARCH_OMAP15XX)
 #  undef  cpu_is_omap15xx
 #  define cpu_is_omap15xx()		is_omap15xx()
 # endif
@@ -119,7 +129,7 @@
 #  undef  cpu_is_omap7xx
 #  define cpu_is_omap7xx()		1
 # endif
-# if defined(CONFIG_ARCH_OMAP1510)
+# if defined(CONFIG_ARCH_OMAP15XX)
 #  undef  cpu_is_omap15xx
 #  define cpu_is_omap15xx()		1
 # endif
@@ -129,13 +139,18 @@
 # endif
 # if defined(CONFIG_ARCH_OMAP24XX)
 #  undef  cpu_is_omap24xx
+#  undef  cpu_is_omap242x
+#  undef  cpu_is_omap243x
 #  define cpu_is_omap24xx()		1
+#  define cpu_is_omap242x()		is_omap242x()
+#  define cpu_is_omap243x()		is_omap243x()
 # endif
 #endif
 
 /*
  * Macros to detect individual cpu types.
  * These are only rarely needed.
+ * cpu_is_omap330():	True for OMAP330
  * cpu_is_omap730():	True for OMAP730
  * cpu_is_omap1510():	True for OMAP1510
  * cpu_is_omap1610():	True for OMAP1610
@@ -144,6 +159,9 @@
  * cpu_is_omap1621():	True for OMAP1621
  * cpu_is_omap1710():	True for OMAP1710
  * cpu_is_omap2420():	True for OMAP2420
+ * cpu_is_omap2422():	True for OMAP2422
+ * cpu_is_omap2423():	True for OMAP2423
+ * cpu_is_omap2430():	True for OMAP2430
  */
 #define GET_OMAP_TYPE	((system_rev >> 16) & 0xffff)
 
@@ -153,6 +171,7 @@
 	return (GET_OMAP_TYPE == (id)) ? 1 : 0;		\
 }
 
+IS_OMAP_TYPE(310, 0x0310)
 IS_OMAP_TYPE(730, 0x0730)
 IS_OMAP_TYPE(1510, 0x1510)
 IS_OMAP_TYPE(1610, 0x1610)
@@ -161,7 +180,11 @@
 IS_OMAP_TYPE(1621, 0x1621)
 IS_OMAP_TYPE(1710, 0x1710)
 IS_OMAP_TYPE(2420, 0x2420)
+IS_OMAP_TYPE(2422, 0x2422)
+IS_OMAP_TYPE(2423, 0x2423)
+IS_OMAP_TYPE(2430, 0x2430)
 
+#define cpu_is_omap310()		0
 #define cpu_is_omap730()		0
 #define cpu_is_omap1510()		0
 #define cpu_is_omap1610()		0
@@ -170,31 +193,33 @@
 #define cpu_is_omap1621()		0
 #define cpu_is_omap1710()		0
 #define cpu_is_omap2420()		0
+#define cpu_is_omap2422()		0
+#define cpu_is_omap2423()		0
+#define cpu_is_omap2430()		0
 
 #if defined(MULTI_OMAP1)
 # if defined(CONFIG_ARCH_OMAP730)
 #  undef  cpu_is_omap730
 #  define cpu_is_omap730()		is_omap730()
 # endif
-# if defined(CONFIG_ARCH_OMAP1510)
-#  undef  cpu_is_omap1510
-#  define cpu_is_omap1510()		is_omap1510()
-# endif
 #else
 # if defined(CONFIG_ARCH_OMAP730)
 #  undef  cpu_is_omap730
 #  define cpu_is_omap730()		1
 # endif
-# if defined(CONFIG_ARCH_OMAP1510)
-#  undef  cpu_is_omap1510
-#  define cpu_is_omap1510()		1
-# endif
 #endif
 
 /*
  * Whether we have MULTI_OMAP1 or not, we still need to distinguish
- * between 1611B/5912 and 1710.
+ * between 330 vs. 1510 and 1611B/5912 vs. 1710.
  */
+#if defined(CONFIG_ARCH_OMAP15XX)
+# undef  cpu_is_omap310
+# undef  cpu_is_omap1510
+# define cpu_is_omap310()		is_omap310()
+# define cpu_is_omap1510()		is_omap1510()
+#endif
+
 #if defined(CONFIG_ARCH_OMAP16XX)
 # undef  cpu_is_omap1610
 # undef  cpu_is_omap1611
@@ -208,9 +233,20 @@
 # define cpu_is_omap1710()		is_omap1710()
 #endif
 
-#if defined(CONFIG_ARCH_OMAP2420)
-#  undef  cpu_is_omap2420
-#  define cpu_is_omap2420()		1
+#if defined(CONFIG_ARCH_OMAP24XX)
+# undef  cpu_is_omap2420
+# undef  cpu_is_omap2422
+# undef  cpu_is_omap2423
+# undef  cpu_is_omap2430
+# define cpu_is_omap2420()		is_omap2420()
+# define cpu_is_omap2422()		is_omap2422()
+# define cpu_is_omap2423()		is_omap2423()
+# define cpu_is_omap2430()		is_omap2430()
 #endif
 
+/* Macros to detect if we have OMAP1 or OMAP2 */
+#define cpu_class_is_omap1()	(cpu_is_omap730() || cpu_is_omap15xx() || \
+				cpu_is_omap16xx())
+#define cpu_class_is_omap2()	cpu_is_omap24xx()
+
 #endif
diff --git a/include/asm-arm/arch-omap/dma.h b/include/asm-arm/arch-omap/dma.h
index 04ebef5..ccbcb58 100644
--- a/include/asm-arm/arch-omap/dma.h
+++ b/include/asm-arm/arch-omap/dma.h
@@ -22,9 +22,109 @@
 #define __ASM_ARCH_DMA_H
 
 #define MAX_DMA_ADDRESS			0xffffffff
+#define MAX_DMA_CHANNELS		0
+
+/* Hardware registers for omap1 */
+#define OMAP_DMA_BASE			(0xfffed800)
+#define OMAP_DMA_GCR			(OMAP_DMA_BASE + 0x400)
+#define OMAP_DMA_GSCR			(OMAP_DMA_BASE + 0x404)
+#define OMAP_DMA_GRST			(OMAP_DMA_BASE + 0x408)
+#define OMAP_DMA_HW_ID			(OMAP_DMA_BASE + 0x442)
+#define OMAP_DMA_PCH2_ID		(OMAP_DMA_BASE + 0x444)
+#define OMAP_DMA_PCH0_ID		(OMAP_DMA_BASE + 0x446)
+#define OMAP_DMA_PCH1_ID		(OMAP_DMA_BASE + 0x448)
+#define OMAP_DMA_PCHG_ID		(OMAP_DMA_BASE + 0x44a)
+#define OMAP_DMA_PCHD_ID		(OMAP_DMA_BASE + 0x44c)
+#define OMAP_DMA_CAPS_0_U		(OMAP_DMA_BASE + 0x44e)
+#define OMAP_DMA_CAPS_0_L		(OMAP_DMA_BASE + 0x450)
+#define OMAP_DMA_CAPS_1_U		(OMAP_DMA_BASE + 0x452)
+#define OMAP_DMA_CAPS_1_L		(OMAP_DMA_BASE + 0x454)
+#define OMAP_DMA_CAPS_2			(OMAP_DMA_BASE + 0x456)
+#define OMAP_DMA_CAPS_3			(OMAP_DMA_BASE + 0x458)
+#define OMAP_DMA_CAPS_4			(OMAP_DMA_BASE + 0x45a)
+#define OMAP_DMA_PCH2_SR		(OMAP_DMA_BASE + 0x460)
+#define OMAP_DMA_PCH0_SR		(OMAP_DMA_BASE + 0x480)
+#define OMAP_DMA_PCH1_SR		(OMAP_DMA_BASE + 0x482)
+#define OMAP_DMA_PCHD_SR		(OMAP_DMA_BASE + 0x4c0)
+
+/* Hardware registers for omap2 */
+#define OMAP24XX_DMA_BASE		(L4_24XX_BASE + 0x56000)
+#define OMAP_DMA4_REVISION		(OMAP24XX_DMA_BASE + 0x00)
+#define OMAP_DMA4_GCR_REG		(OMAP24XX_DMA_BASE + 0x78)
+#define OMAP_DMA4_IRQSTATUS_L0		(OMAP24XX_DMA_BASE + 0x08)
+#define OMAP_DMA4_IRQSTATUS_L1		(OMAP24XX_DMA_BASE + 0x0c)
+#define OMAP_DMA4_IRQSTATUS_L2		(OMAP24XX_DMA_BASE + 0x10)
+#define OMAP_DMA4_IRQSTATUS_L3		(OMAP24XX_DMA_BASE + 0x14)
+#define OMAP_DMA4_IRQENABLE_L0		(OMAP24XX_DMA_BASE + 0x18)
+#define OMAP_DMA4_IRQENABLE_L1		(OMAP24XX_DMA_BASE + 0x1c)
+#define OMAP_DMA4_IRQENABLE_L2		(OMAP24XX_DMA_BASE + 0x20)
+#define OMAP_DMA4_IRQENABLE_L3		(OMAP24XX_DMA_BASE + 0x24)
+#define OMAP_DMA4_SYSSTATUS		(OMAP24XX_DMA_BASE + 0x28)
+#define OMAP_DMA4_CAPS_0		(OMAP24XX_DMA_BASE + 0x64)
+#define OMAP_DMA4_CAPS_2		(OMAP24XX_DMA_BASE + 0x6c)
+#define OMAP_DMA4_CAPS_3		(OMAP24XX_DMA_BASE + 0x70)
+#define OMAP_DMA4_CAPS_4		(OMAP24XX_DMA_BASE + 0x74)
+
+#ifdef CONFIG_ARCH_OMAP1
 
 #define OMAP_LOGICAL_DMA_CH_COUNT	17
 
+/* Common channel specific registers for omap1 */
+#define OMAP_DMA_CSDP_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x00)
+#define OMAP_DMA_CCR_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x02)
+#define OMAP_DMA_CICR_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x04)
+#define OMAP_DMA_CSR_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x06)
+#define OMAP_DMA_CEN_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x10)
+#define OMAP_DMA_CFN_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x12)
+#define OMAP_DMA_CSFI_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x14)
+#define OMAP_DMA_CSEI_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x16)
+#define OMAP_DMA_CSAC_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x18)
+#define OMAP_DMA_CDAC_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x1a)
+#define OMAP_DMA_CDEI_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x1c)
+#define OMAP_DMA_CDFI_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x1e)
+#define OMAP_DMA_CLNK_CTRL_REG(n)	__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x28)
+
+#else
+
+#define OMAP_LOGICAL_DMA_CH_COUNT	32	/* REVISIT: Is this 32 + 2? */
+
+/* Common channel specific registers for omap2 */
+#define OMAP_DMA_CCR_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x80)
+#define OMAP_DMA_CLNK_CTRL_REG(n)	__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x84)
+#define OMAP_DMA_CICR_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x88)
+#define OMAP_DMA_CSR_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x8c)
+#define OMAP_DMA_CSDP_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x90)
+#define OMAP_DMA_CEN_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x94)
+#define OMAP_DMA_CFN_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x98)
+#define OMAP_DMA_CSEI_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xa4)
+#define OMAP_DMA_CSFI_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xa8)
+#define OMAP_DMA_CDEI_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xac)
+#define OMAP_DMA_CDFI_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xb0)
+#define OMAP_DMA_CSAC_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xb4)
+#define OMAP_DMA_CDAC_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xb8)
+
+#endif
+
+/* Channel specific registers only on omap1 */
+#define OMAP1_DMA_CSSA_L_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x08)
+#define OMAP1_DMA_CSSA_U_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x0a)
+#define OMAP1_DMA_CDSA_L_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x0c)
+#define OMAP1_DMA_CDSA_U_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x0e)
+#define OMAP1_DMA_COLOR_L_REG(n)	__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x20)
+#define OMAP1_DMA_CCR2_REG(n)		__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x24)
+#define OMAP1_DMA_COLOR_U_REG(n)	__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x22)
+#define OMAP1_DMA_LCH_CTRL_REG(n)	__REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x2a)
+
+/* Channel specific registers only on omap2 */
+#define OMAP2_DMA_CSSA_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x9c)
+#define OMAP2_DMA_CDSA_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xa0)
+#define OMAP2_DMA_CCEN_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xbc)
+#define OMAP2_DMA_CCFN_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xc0)
+#define OMAP2_DMA_COLOR_REG(n)		__REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xc4)
+
+/*----------------------------------------------------------------------------*/
+
+/* DMA channels for omap1 */
 #define OMAP_DMA_NO_DEVICE		0
 #define OMAP_DMA_MCSI1_TX		1
 #define OMAP_DMA_MCSI1_RX		2
@@ -85,29 +185,72 @@
 #define OMAP_DMA_MMC2_RX		55
 #define OMAP_DMA_CRYPTO_DES_OUT		56
 
+/* DMA channels for 24xx */
+#define OMAP24XX_DMA_NO_DEVICE		0
+#define OMAP24XX_DMA_XTI_DMA		1	/* S_DMA_0 */
+#define OMAP24XX_DMA_EXT_NDMA_REQ0	2	/* S_DMA_1 */
+#define OMAP24XX_DMA_EXT_NDMA_REQ1	3	/* S_DMA_2 */
+#define OMAP24XX_DMA_GPMC		4	/* S_DMA_3 */
+#define OMAP24XX_DMA_GFX		5	/* S_DMA_4 */
+#define OMAP24XX_DMA_DSS		6	/* S_DMA_5 */
+#define OMAP24XX_DMA_VLYNQ_TX		7	/* S_DMA_6 */
+#define OMAP24XX_DMA_CWT		8	/* S_DMA_7 */
+#define OMAP24XX_DMA_AES_TX		9	/* S_DMA_8 */
+#define OMAP24XX_DMA_AES_RX		10	/* S_DMA_9 */
+#define OMAP24XX_DMA_DES_TX		11	/* S_DMA_10 */
+#define OMAP24XX_DMA_DES_RX		12	/* S_DMA_11 */
+#define OMAP24XX_DMA_SHA1MD5_RX		13	/* S_DMA_12 */
 
-#define OMAP_DMA_BASE			(0xfffed800)
-#define OMAP_DMA_GCR			(OMAP_DMA_BASE + 0x400)
-#define OMAP_DMA_GSCR			(OMAP_DMA_BASE + 0x404)
-#define OMAP_DMA_GRST			(OMAP_DMA_BASE + 0x408)
-#define OMAP_DMA_HW_ID			(OMAP_DMA_BASE + 0x442)
-#define OMAP_DMA_PCH2_ID		(OMAP_DMA_BASE + 0x444)
-#define OMAP_DMA_PCH0_ID		(OMAP_DMA_BASE + 0x446)
-#define OMAP_DMA_PCH1_ID		(OMAP_DMA_BASE + 0x448)
-#define OMAP_DMA_PCHG_ID		(OMAP_DMA_BASE + 0x44a)
-#define OMAP_DMA_PCHD_ID		(OMAP_DMA_BASE + 0x44c)
-#define OMAP_DMA_CAPS_0_U		(OMAP_DMA_BASE + 0x44e)
-#define OMAP_DMA_CAPS_0_L		(OMAP_DMA_BASE + 0x450)
-#define OMAP_DMA_CAPS_1_U		(OMAP_DMA_BASE + 0x452)
-#define OMAP_DMA_CAPS_1_L		(OMAP_DMA_BASE + 0x454)
-#define OMAP_DMA_CAPS_2			(OMAP_DMA_BASE + 0x456)
-#define OMAP_DMA_CAPS_3			(OMAP_DMA_BASE + 0x458)
-#define OMAP_DMA_CAPS_4			(OMAP_DMA_BASE + 0x45a)
-#define OMAP_DMA_PCH2_SR		(OMAP_DMA_BASE + 0x460)
-#define OMAP_DMA_PCH0_SR		(OMAP_DMA_BASE + 0x480)
-#define OMAP_DMA_PCH1_SR		(OMAP_DMA_BASE + 0x482)
-#define OMAP_DMA_PCHD_SR		(OMAP_DMA_BASE + 0x4c0)
+#define OMAP24XX_DMA_EAC_AC_RD		17	/* S_DMA_16 */
+#define OMAP24XX_DMA_EAC_AC_WR		18	/* S_DMA_17 */
+#define OMAP24XX_DMA_EAC_MD_UL_RD	19	/* S_DMA_18 */
+#define OMAP24XX_DMA_EAC_MD_UL_WR	20	/* S_DMA_19 */
+#define OMAP24XX_DMA_EAC_MD_DL_RD	21	/* S_DMA_20 */
+#define OMAP24XX_DMA_EAC_MD_DL_WR	22	/* S_DMA_21 */
+#define OMAP24XX_DMA_EAC_BT_UL_RD	23	/* S_DMA_22 */
+#define OMAP24XX_DMA_EAC_BT_UL_WR	24	/* S_DMA_23 */
+#define OMAP24XX_DMA_EAC_BT_DL_RD	25	/* S_DMA_24 */
+#define OMAP24XX_DMA_EAC_BT_DL_WR	26	/* S_DMA_25 */
+#define OMAP24XX_DMA_I2C1_TX		27	/* S_DMA_26 */
+#define OMAP24XX_DMA_I2C1_RX		28	/* S_DMA_27 */
+#define OMAP24XX_DMA_I2C2_TX		29	/* S_DMA_28 */
+#define OMAP24XX_DMA_I2C2_RX		30	/* S_DMA_29 */
+#define OMAP24XX_DMA_MCBSP1_TX		31	/* SDMA_30 */
+#define OMAP24XX_DMA_MCBSP1_RX		32	/* SDMA_31 */
+#define OMAP24XX_DMA_MCBSP2_TX		33	/* SDMA_32 */
+#define OMAP24XX_DMA_MCBSP2_RX		34	/* SDMA_33 */
+#define OMAP24XX_DMA_SPI1_TX0		35	/* SDMA_34 */
+#define OMAP24XX_DMA_SPI1_RX0		36	/* SDMA_35 */
+#define OMAP24XX_DMA_SPI1_TX1		37	/* SDMA_36 */
+#define OMAP24XX_DMA_SPI1_RX1		38	/* SDMA_37 */
+#define OMAP24XX_DMA_SPI1_TX2		39	/* SDMA_38 */
+#define OMAP24XX_DMA_SPI1_RX2		40	/* SDMA_39 */
+#define OMAP24XX_DMA_SPI1_TX3		41	/* SDMA_40 */
+#define OMAP24XX_DMA_SPI1_RX3		42	/* SDMA_41 */
+#define OMAP24XX_DMA_SPI2_TX0		43	/* SDMA_42 */
+#define OMAP24XX_DMA_SPI2_RX0		44	/* SDMA_43 */
+#define OMAP24XX_DMA_SPI2_TX1		45	/* SDMA_44 */
+#define OMAP24XX_DMA_SPI2_RX1		46	/* SDMA_45 */
 
+#define OMAP24XX_DMA_UART1_TX		49	/* SDMA_48 */
+#define OMAP24XX_DMA_UART1_RX		50	/* SDMA_49 */
+#define OMAP24XX_DMA_UART2_TX		51	/* SDMA_50 */
+#define OMAP24XX_DMA_UART2_RX		52	/* SDMA_51 */
+#define OMAP24XX_DMA_UART3_TX		53	/* SDMA_52 */
+#define OMAP24XX_DMA_UART3_RX		54	/* SDMA_53 */
+#define OMAP24XX_DMA_USB_W2FC_TX0	55	/* SDMA_54 */
+#define OMAP24XX_DMA_USB_W2FC_RX0	56	/* SDMA_55 */
+#define OMAP24XX_DMA_USB_W2FC_TX1	57	/* SDMA_56 */
+#define OMAP24XX_DMA_USB_W2FC_RX1	58	/* SDMA_57 */
+#define OMAP24XX_DMA_USB_W2FC_TX2	59	/* SDMA_58 */
+#define OMAP24XX_DMA_USB_W2FC_RX2	60	/* SDMA_59 */
+#define OMAP24XX_DMA_MMC1_TX		61	/* SDMA_60 */
+#define OMAP24XX_DMA_MMC1_RX		62	/* SDMA_61 */
+#define OMAP24XX_DMA_MS			63	/* SDMA_62 */
+
+/*----------------------------------------------------------------------------*/
+
+/* Hardware registers for LCD DMA */
 #define OMAP1510_DMA_LCD_BASE		(0xfffedb00)
 #define OMAP1510_DMA_LCD_CTRL		(OMAP1510_DMA_LCD_BASE + 0x00)
 #define OMAP1510_DMA_LCD_TOP_F1_L	(OMAP1510_DMA_LCD_BASE + 0x02)
@@ -116,7 +259,7 @@
 #define OMAP1510_DMA_LCD_BOT_F1_U	(OMAP1510_DMA_LCD_BASE + 0x08)
 
 #define OMAP1610_DMA_LCD_BASE		(0xfffee300)
-#define OMAP1610_DMA_LCD_CSDP      	(OMAP1610_DMA_LCD_BASE + 0xc0)
+#define OMAP1610_DMA_LCD_CSDP		(OMAP1610_DMA_LCD_BASE + 0xc0)
 #define OMAP1610_DMA_LCD_CCR		(OMAP1610_DMA_LCD_BASE + 0xc2)
 #define OMAP1610_DMA_LCD_CTRL		(OMAP1610_DMA_LCD_BASE + 0xc4)
 #define OMAP1610_DMA_LCD_TOP_B1_L	(OMAP1610_DMA_LCD_BASE + 0xc8)
@@ -134,37 +277,18 @@
 #define OMAP1610_DMA_LCD_LCH_CTRL	(OMAP1610_DMA_LCD_BASE + 0xea)
 #define OMAP1610_DMA_LCD_SRC_FI_B1_U	(OMAP1610_DMA_LCD_BASE + 0xf4)
 
-
-/* Every LCh has its own set of the registers below */
-#define OMAP_DMA_CSDP(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x00)
-#define OMAP_DMA_CCR(n)			(OMAP_DMA_BASE + 0x40 * (n) + 0x02)
-#define OMAP_DMA_CICR(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x04)
-#define OMAP_DMA_CSR(n)			(OMAP_DMA_BASE + 0x40 * (n) + 0x06)
-#define OMAP_DMA_CSSA_L(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x08)
-#define OMAP_DMA_CSSA_U(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x0a)
-#define OMAP_DMA_CDSA_L(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x0c)
-#define OMAP_DMA_CDSA_U(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x0e)
-#define OMAP_DMA_CEN(n)			(OMAP_DMA_BASE + 0x40 * (n) + 0x10)
-#define OMAP_DMA_CFN(n)			(OMAP_DMA_BASE + 0x40 * (n) + 0x12)
-#define OMAP_DMA_CSFI(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x14)
-#define OMAP_DMA_CSEI(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x16)
-#define OMAP_DMA_CSAC(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x18)
-#define OMAP_DMA_CDAC(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x1a)
-#define OMAP_DMA_CDEI(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x1c)
-#define OMAP_DMA_CDFI(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x1e)
-#define OMAP_DMA_COLOR_L(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x20)
-#define OMAP_DMA_COLOR_U(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x22)
-#define OMAP_DMA_CCR2(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x24)
-#define OMAP_DMA_CLNK_CTRL(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x28)
-#define OMAP_DMA_LCH_CTRL(n)		(OMAP_DMA_BASE + 0x40 * (n) + 0x2a)
-
-#define OMAP_DMA_TOUT_IRQ		(1 << 0)
+#define OMAP_DMA_TOUT_IRQ		(1 << 0)	/* Only on omap1 */
 #define OMAP_DMA_DROP_IRQ		(1 << 1)
 #define OMAP_DMA_HALF_IRQ		(1 << 2)
 #define OMAP_DMA_FRAME_IRQ		(1 << 3)
 #define OMAP_DMA_LAST_IRQ		(1 << 4)
 #define OMAP_DMA_BLOCK_IRQ		(1 << 5)
-#define OMAP_DMA_SYNC_IRQ		(1 << 6)
+#define OMAP1_DMA_SYNC_IRQ		(1 << 6)
+#define OMAP2_DMA_PKT_IRQ		(1 << 7)
+#define OMAP2_DMA_TRANS_ERR_IRQ		(1 << 8)
+#define OMAP2_DMA_SECURE_ERR_IRQ	(1 << 9)
+#define OMAP2_DMA_SUPERVISOR_ERR_IRQ	(1 << 10)
+#define OMAP2_DMA_MISALIGNED_ERR_IRQ	(1 << 11)
 
 #define OMAP_DMA_DATA_TYPE_S8		0x00
 #define OMAP_DMA_DATA_TYPE_S16		0x01
@@ -194,6 +318,7 @@
 	OMAP_LCD_DMA_B2_BOTTOM
 };
 
+/* REVISIT: Check if BURST_4 is really 1 (or 2) */
 enum omap_dma_burst_mode {
 	OMAP_DMA_DATA_BURST_DIS = 0,
 	OMAP_DMA_DATA_BURST_4,
@@ -206,6 +331,31 @@
 	OMAP_DMA_TRANSPARENT_COPY
 };
 
+struct omap_dma_channel_params {
+	int data_type;		/* data type 8,16,32 */
+	int elem_count;		/* number of elements in a frame */
+	int frame_count;	/* number of frames in a element */
+
+	int src_port;		/* Only on OMAP1 REVISIT: Is this needed? */
+	int src_amode;		/* constant , post increment, indexed , double indexed */
+	int src_start;		/* source address : physical */
+	int src_ei;		/* source element index */
+	int src_fi;		/* source frame index */
+
+	int dst_port;		/* Only on OMAP1 REVISIT: Is this needed? */
+	int dst_amode;		/* constant , post increment, indexed , double indexed */
+	int dst_start;		/* source address : physical */
+	int dst_ei;		/* source element index */
+	int dst_fi;		/* source frame index */
+
+	int trigger;		/* trigger attached if the channel is synchronized */
+	int sync_mode;		/* sycn on element, frame , block or packet */
+	int src_or_dst_synch;	/* source synch(1) or destination synch(0) */
+
+	int ie;			/* interrupt enabled */
+};
+
+
 extern void omap_set_dma_priority(int dst_port, int priority);
 extern int omap_request_dma(int dev_id, const char *dev_name,
 			    void (* callback)(int lch, u16 ch_status, void *data),
@@ -217,24 +367,30 @@
 extern void omap_stop_dma(int lch);
 extern void omap_set_dma_transfer_params(int lch, int data_type,
 					 int elem_count, int frame_count,
-					 int sync_mode);
+					 int sync_mode,
+					 int dma_trigger, int src_or_dst_synch);
 extern void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode,
 				    u32 color);
 
 extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
-				    unsigned long src_start);
+				    unsigned long src_start,
+				    int src_ei, int src_fi);
 extern void omap_set_dma_src_index(int lch, int eidx, int fidx);
 extern void omap_set_dma_src_data_pack(int lch, int enable);
 extern void omap_set_dma_src_burst_mode(int lch,
 					enum omap_dma_burst_mode burst_mode);
 
 extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
-				     unsigned long dest_start);
+				     unsigned long dest_start,
+				     int dst_ei, int dst_fi);
 extern void omap_set_dma_dest_index(int lch, int eidx, int fidx);
 extern void omap_set_dma_dest_data_pack(int lch, int enable);
 extern void omap_set_dma_dest_burst_mode(int lch,
 					 enum omap_dma_burst_mode burst_mode);
 
+extern void omap_set_dma_params(int lch,
+				struct omap_dma_channel_params * params);
+
 extern void omap_dma_link_lch (int lch_head, int lch_queue);
 extern void omap_dma_unlink_lch (int lch_head, int lch_queue);
 
@@ -244,9 +400,6 @@
 extern void omap_clear_dma(int lch);
 extern int omap_dma_running(void);
 
-/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
-extern int omap_dma_in_1510_mode(void);
-
 /* LCD DMA functions */
 extern int omap_request_lcd_dma(void (* callback)(u16 status, void *data),
 				void *data);
diff --git a/include/asm-arm/arch-omap/entry-macro.S b/include/asm-arm/arch-omap/entry-macro.S
index 0d29b9c..f8814a8 100644
--- a/include/asm-arm/arch-omap/entry-macro.S
+++ b/include/asm-arm/arch-omap/entry-macro.S
@@ -10,6 +10,20 @@
 
 #if defined(CONFIG_ARCH_OMAP1)
 
+#if defined(CONFIG_ARCH_OMAP730) && \
+	(defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX))
+#error "FIXME: OMAP730 doesn't support multiple-OMAP"
+#elif defined(CONFIG_ARCH_OMAP730)
+#define INT_IH2_IRQ		INT_730_IH2_IRQ
+#elif defined(CONFIG_ARCH_OMAP15XX)
+#define INT_IH2_IRQ		INT_1510_IH2_IRQ
+#elif defined(CONFIG_ARCH_OMAP16XX)
+#define INT_IH2_IRQ		INT_1610_IH2_IRQ
+#else
+#warning "IH2 IRQ defaulted"
+#define INT_IH2_IRQ		INT_1510_IH2_IRQ
+#endif
+
  		.macro	disable_fiq
 		.endm
 
diff --git a/include/asm-arm/arch-omap/fpga.h b/include/asm-arm/arch-omap/fpga.h
index 676807d..6a883e0 100644
--- a/include/asm-arm/arch-omap/fpga.h
+++ b/include/asm-arm/arch-omap/fpga.h
@@ -19,7 +19,7 @@
 #ifndef __ASM_ARCH_OMAP_FPGA_H
 #define __ASM_ARCH_OMAP_FPGA_H
 
-#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP1510)
+#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
 extern void omap1510_fpga_init_irq(void);
 #else
 #define omap1510_fpga_init_irq()	(0)
@@ -77,6 +77,8 @@
 #define H2P2_DBG_FPGA_LOAD_METER_SIZE	11
 #define H2P2_DBG_FPGA_LOAD_METER_MASK	((1 << H2P2_DBG_FPGA_LOAD_METER_SIZE) - 1)
 
+#define H2P2_DBG_FPGA_P2_LED_TIMER		(1 << 0)
+#define H2P2_DBG_FPGA_P2_LED_IDLE		(1 << 1)
 
 /*
  * ---------------------------------------------------------------------------
diff --git a/include/asm-arm/arch-omap/gpio.h b/include/asm-arm/arch-omap/gpio.h
index 74cb2b9..1b38857 100644
--- a/include/asm-arm/arch-omap/gpio.h
+++ b/include/asm-arm/arch-omap/gpio.h
@@ -67,7 +67,7 @@
 
 #define OMAP_GPIO_IRQ(nr)	(OMAP_GPIO_IS_MPUIO(nr) ? \
 				 IH_MPUIO_BASE + ((nr) & 0x0f) : \
-				 IH_GPIO_BASE + ((nr) & 0x3f))
+				 IH_GPIO_BASE + (nr))
 
 extern int omap_gpio_init(void);	/* Call from board init only */
 extern int omap_request_gpio(int gpio);
diff --git a/include/asm-arm/arch-omap/hardware.h b/include/asm-arm/arch-omap/hardware.h
index 60201e1..5406b87 100644
--- a/include/asm-arm/arch-omap/hardware.h
+++ b/include/asm-arm/arch-omap/hardware.h
@@ -267,8 +267,6 @@
 #define OMAP_LPG2_LCR			(OMAP_LPG2_BASE + 0x00)
 #define OMAP_LPG2_PMR			(OMAP_LPG2_BASE + 0x04)
 
-#ifndef __ASSEMBLER__
-
 /*
  * ---------------------------------------------------------------------------
  * Processor specific defines
@@ -277,13 +275,11 @@
 
 #include "omap730.h"
 #include "omap1510.h"
-
-#ifdef CONFIG_ARCH_OMAP24XX
 #include "omap24xx.h"
-#endif
-
 #include "omap16xx.h"
 
+#ifndef __ASSEMBLER__
+
 /*
  * ---------------------------------------------------------------------------
  * Board specific defines
diff --git a/include/asm-arm/arch-omap/io.h b/include/asm-arm/arch-omap/io.h
index 3d5bcd5..f5bcc9a 100644
--- a/include/asm-arm/arch-omap/io.h
+++ b/include/asm-arm/arch-omap/io.h
@@ -52,23 +52,33 @@
  * ----------------------------------------------------------------------------
  */
 
+#define PCIO_BASE	0
+
 #if defined(CONFIG_ARCH_OMAP1)
+
 #define IO_PHYS		0xFFFB0000
-#define IO_OFFSET      -0x01000000	/* Virtual IO = 0xfefb0000 */
+#define IO_OFFSET	0x01000000	/* Virtual IO = 0xfefb0000 */
 #define IO_SIZE		0x40000
+#define IO_VIRT		(IO_PHYS - IO_OFFSET)
+#define IO_ADDRESS(pa)	((pa) - IO_OFFSET)
+#define io_p2v(pa)	((pa) - IO_OFFSET)
+#define io_v2p(va)	((va) + IO_OFFSET)
 
 #elif defined(CONFIG_ARCH_OMAP2)
-#define IO_PHYS		0x48000000	/* L4 peripherals; other stuff has to be mapped *
-					 * manually. */
-#define IO_OFFSET	0x90000000	/* Virtual IO = 0xd8000000 */
-#define IO_SIZE		0x08000000
-#endif
 
-#define IO_VIRT		(IO_PHYS + IO_OFFSET)
-#define IO_ADDRESS(x)	((x) + IO_OFFSET)
-#define PCIO_BASE	0
-#define io_p2v(x)	((x) + IO_OFFSET)
-#define io_v2p(x)	((x) - IO_OFFSET)
+/* We map both L3 and L4 on OMAP2 */
+#define L3_24XX_PHYS	L3_24XX_BASE	/* 0x68000000 */
+#define L3_24XX_VIRT	0xf8000000
+#define L3_24XX_SIZE	SZ_1M		/* 44kB of 128MB used, want 1MB sect */
+#define L4_24XX_PHYS	L4_24XX_BASE	/* 0x48000000 */
+#define L4_24XX_VIRT	0xd8000000
+#define L4_24XX_SIZE	SZ_1M		/* 1MB of 128MB used, want 1MB sect */
+#define IO_OFFSET	0x90000000
+#define IO_ADDRESS(pa)	((pa) + IO_OFFSET)	/* Works for L3 and L4 */
+#define io_p2v(pa)	((pa) + IO_OFFSET)	/* Works for L3 and L4 */
+#define io_v2p(va)	((va) - IO_OFFSET)	/* Works for L3 and L4 */
+
+#endif
 
 #ifndef __ASSEMBLER__
 
diff --git a/include/asm-arm/arch-omap/irqs.h b/include/asm-arm/arch-omap/irqs.h
index 74e108c..9779686 100644
--- a/include/asm-arm/arch-omap/irqs.h
+++ b/include/asm-arm/arch-omap/irqs.h
@@ -22,8 +22,8 @@
  *	 are different.
  */
 
-#ifndef __ASM_ARCH_OMAP1510_IRQS_H
-#define __ASM_ARCH_OMAP1510_IRQS_H
+#ifndef __ASM_ARCH_OMAP15XX_IRQS_H
+#define __ASM_ARCH_OMAP15XX_IRQS_H
 
 /*
  * IRQ numbers for interrupt handler 1
@@ -31,7 +31,6 @@
  * NOTE: See also the OMAP-1510 and 1610 specific IRQ numbers below
  *
  */
-#define INT_IH2_IRQ		0
 #define INT_CAMERA		1
 #define INT_FIQ			3
 #define INT_RTDX		6
@@ -60,6 +59,7 @@
 /*
  * OMAP-1510 specific IRQ numbers for interrupt handler 1
  */
+#define INT_1510_IH2_IRQ	0
 #define INT_1510_RES2		2
 #define INT_1510_SPI_TX		4
 #define INT_1510_SPI_RX		5
@@ -71,6 +71,7 @@
 /*
  * OMAP-1610 specific IRQ numbers for interrupt handler 1
  */
+#define INT_1610_IH2_IRQ	0
 #define INT_1610_IH2_FIQ	2
 #define INT_1610_McBSP2_TX	4
 #define INT_1610_McBSP2_RX	5
@@ -231,6 +232,12 @@
 #define INT_730_DMA_CH15	(62 + IH2_BASE)
 #define INT_730_NAND		(63 + IH2_BASE)
 
+#define INT_24XX_SYS_NIRQ	7
+#define INT_24XX_SDMA_IRQ0	12
+#define INT_24XX_SDMA_IRQ1	13
+#define INT_24XX_SDMA_IRQ2	14
+#define INT_24XX_SDMA_IRQ3	15
+#define INT_24XX_DSS_IRQ	25
 #define INT_24XX_GPIO_BANK1	29
 #define INT_24XX_GPIO_BANK2	30
 #define INT_24XX_GPIO_BANK3	31
diff --git a/include/asm-arm/arch-omap/memory.h b/include/asm-arm/arch-omap/memory.h
index bf545b6..df50dd5 100644
--- a/include/asm-arm/arch-omap/memory.h
+++ b/include/asm-arm/arch-omap/memory.h
@@ -61,7 +61,7 @@
  * Note that the is_lbus_device() test is not very efficient on 1510
  * because of the strncmp().
  */
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
 
 /*
  * OMAP-1510 Local Bus address offset
@@ -84,7 +84,7 @@
 					virt_to_lbus(addr) : \
 					__virt_to_bus(addr);})
 
-#endif	/* CONFIG_ARCH_OMAP1510 */
+#endif	/* CONFIG_ARCH_OMAP15XX */
 
 #endif
 
diff --git a/include/asm-arm/arch-omap/menelaus.h b/include/asm-arm/arch-omap/menelaus.h
new file mode 100644
index 0000000..46be8b8
--- /dev/null
+++ b/include/asm-arm/arch-omap/menelaus.h
@@ -0,0 +1,22 @@
+/*
+ * linux/include/asm-arm/arch-omap/menelaus.h
+ *
+ * Functions to access Menelaus power management chip
+ */
+
+#ifndef __ASM_ARCH_MENELAUS_H
+#define __ASM_ARCH_MENELAUS_H
+
+extern void menelaus_mmc_register(void (*callback)(u8 card_mask),
+				  unsigned long data);
+extern void menelaus_mmc_remove(void);
+extern void menelaus_mmc_opendrain(int enable);
+
+#if defined(CONFIG_ARCH_OMAP24XX) && defined(CONFIG_MENELAUS)
+#define omap_has_menelaus()	1
+#else
+#define omap_has_menelaus()	0
+#endif
+
+#endif
+
diff --git a/include/asm-arm/arch-omap/mux.h b/include/asm-arm/arch-omap/mux.h
index 1b1ad41..13415a9a 100644
--- a/include/asm-arm/arch-omap/mux.h
+++ b/include/asm-arm/arch-omap/mux.h
@@ -4,7 +4,7 @@
  * Table of the Omap register configurations for the FUNC_MUX and
  * PULL_DWN combinations.
  *
- * Copyright (C) 2003 Nokia Corporation
+ * Copyright (C) 2003 - 2005 Nokia Corporation
  *
  * Written by Tony Lindgren <tony.lindgren@nokia.com>
  *
@@ -58,6 +58,16 @@
 					.pu_pd_reg = PU_PD_SEL_##reg, \
 					.pu_pd_val = status,
 
+#define MUX_REG_730(reg, mode_offset, mode) .mux_reg_name = "OMAP730_IO_CONF_"#reg, \
+					.mux_reg = OMAP730_IO_CONF_##reg, \
+					.mask_offset = mode_offset, \
+					.mask = mode,
+
+#define PULL_REG_730(reg, bit, status)	.pull_name = "OMAP730_IO_CONF_"#reg, \
+					.pull_reg = OMAP730_IO_CONF_##reg, \
+					.pull_bit = bit, \
+					.pull_val = status,
+
 #else
 
 #define MUX_REG(reg, mode_offset, mode) .mux_reg = FUNC_MUX_CTRL_##reg, \
@@ -71,6 +81,15 @@
 #define PU_PD_REG(reg, status)		.pu_pd_reg = PU_PD_SEL_##reg, \
 					.pu_pd_val = status,
 
+#define MUX_REG_730(reg, mode_offset, mode) \
+					.mux_reg = OMAP730_IO_CONF_##reg, \
+					.mask_offset = mode_offset, \
+					.mask = mode,
+
+#define PULL_REG_730(reg, bit, status)	.pull_reg = OMAP730_IO_CONF_##reg, \
+					.pull_bit = bit, \
+					.pull_val = status,
+
 #endif /* CONFIG_OMAP_MUX_DEBUG */
 
 #define MUX_CFG(desc, mux_reg, mode_offset, mode,	\
@@ -84,13 +103,44 @@
 	PU_PD_REG(pu_pd_reg, pu_pd_status)		\
 },
 
+
+/*
+ * OMAP730 has a slightly different config for the pin mux.
+ * - config regs are the OMAP730_IO_CONF_x regs (see omap730.h) regs and
+ *   not the FUNC_MUX_CTRL_x regs from hardware.h
+ * - for pull-up/down, only has one enable bit which is is in the same register
+ *   as mux config
+ */
+#define MUX_CFG_730(desc, mux_reg, mode_offset, mode,	\
+		   pull_reg, pull_bit, pull_status,	\
+		   pu_pd_reg, pu_pd_status, debug_status)\
+{							\
+	.name =	 desc,					\
+	.debug = debug_status,				\
+	MUX_REG_730(mux_reg, mode_offset, mode)		\
+	PULL_REG_730(mux_reg, pull_bit, pull_status)	\
+	PU_PD_REG(pu_pd_reg, pu_pd_status)		\
+},
+
+#define MUX_CFG_24XX(desc, reg_offset, mode,			\
+				pull_en, pull_mode, dbg)	\
+{								\
+	.name		= desc,					\
+	.debug		= dbg,					\
+	.mux_reg	= reg_offset,				\
+	.mask		= mode,					\
+	.pull_val	= pull_en,				\
+	.pu_pd_val	= pull_mode,				\
+},
+
+
 #define PULL_DISABLED	0
 #define PULL_ENABLED	1
 
 #define PULL_DOWN	0
 #define PULL_UP		1
 
-typedef struct {
+struct pin_config {
 	char *name;
 	unsigned char busy;
 	unsigned char debug;
@@ -108,13 +158,23 @@
 	const char *pu_pd_name;
 	const unsigned int pu_pd_reg;
 	const unsigned char pu_pd_val;
-} reg_cfg_set;
+};
 
-/*
- * Lookup table for FUNC_MUX and PULL_DWN register combinations for each
- * device. See also reg_cfg_table below for the register values.
- */
-typedef enum {
+enum omap730_index {
+	/* OMAP 730 keyboard */
+	E2_730_KBR0,
+	J7_730_KBR1,
+	E1_730_KBR2,
+	F3_730_KBR3,
+	D2_730_KBR4,
+	C2_730_KBC0,
+	D3_730_KBC1,
+	E4_730_KBC2,
+	F4_730_KBC3,
+	E3_730_KBC4,
+};
+
+enum omap1xxx_index {
 	/* UART1 (BT_UART_GATING)*/
 	UART1_TX = 0,
 	UART1_RTS,
@@ -331,245 +391,34 @@
 	V10_1610_CF_IREQ,
 	W10_1610_CF_RESET,
 	W11_1610_CF_CD1,
-} reg_cfg_t;
-
-#if defined(__MUX_C__) && defined(CONFIG_OMAP_MUX)
-
-/*
- * Table of various FUNC_MUX and PULL_DWN combinations for each device.
- * See also reg_cfg_t above for the lookup table.
- */
-static const reg_cfg_set __initdata_or_module
-reg_cfg_table[] = {
-/*
- *	 description		mux  mode   mux	 pull pull  pull  pu_pd	 pu  dbg
- *				reg  offset mode reg  bit   ena	  reg
- */
-MUX_CFG("UART1_TX",		 9,   21,    1,	  2,   3,   0,	 NA,	 0,  0)
-MUX_CFG("UART1_RTS",		 9,   12,    1,	  2,   0,   0,	 NA,	 0,  0)
-
-/* UART2 (COM_UART_GATING), conflicts with USB2 */
-MUX_CFG("UART2_TX",		 C,   27,    1,	  3,   3,   0,	 NA,	 0,  0)
-MUX_CFG("UART2_RX",		 C,   18,    0,	  3,   1,   1,	 NA,	 0,  0)
-MUX_CFG("UART2_CTS",		 C,   21,    0,	  3,   1,   1,	 NA,	 0,  0)
-MUX_CFG("UART2_RTS",		 C,   24,    1,	  3,   2,   0,	 NA,	 0,  0)
-
-/* UART3 (GIGA_UART_GATING) */
-MUX_CFG("UART3_TX",		 6,    0,    1,	  0,  30,   0,	 NA,	 0,  0)
-MUX_CFG("UART3_RX",		 6,    3,    0,	  0,  31,   1,	 NA,	 0,  0)
-MUX_CFG("UART3_CTS",		 5,   12,    2,	  0,  24,   0,	 NA,	 0,  0)
-MUX_CFG("UART3_RTS",		 5,   15,    2,	  0,  25,   0,	 NA,	 0,  0)
-MUX_CFG("UART3_CLKREQ",		 9,   27,    0,	  2,   5,   0,	 NA,	 0,  0)
-MUX_CFG("UART3_BCLK",		 A,    0,    0,	  2,   6,   0,	 NA,	 0,  0)
-MUX_CFG("Y15_1610_UART3_RTS",	 A,    0,    1,	  2,   6,   0,	 NA,	 0,  0)
-
-/* PWT & PWL, conflicts with UART3 */
-MUX_CFG("PWT",		 	 6,    0,    2,	  0,  30,   0,	 NA,	 0,  0)
-MUX_CFG("PWL",		 	 6,    3,    1,	  0,  31,   1,	 NA,	 0,  0)
-
-/* USB internal master generic */
-MUX_CFG("R18_USB_VBUS",		 7,    9,    2,	  1,  11,   0,	 NA,	 0,  1)
-MUX_CFG("R18_1510_USB_GPIO0",	 7,    9,    0,	  1,  11,   1,	 NA,	 0,  1)
-/* works around erratum:  W4_USB_PUEN and W4_USB_PUDIS are switched! */
-MUX_CFG("W4_USB_PUEN",		 D,    3,    3,	  3,   5,   1,	 NA,	 0,  1)
-MUX_CFG("W4_USB_CLKO",		 D,    3,    1,	  3,   5,   0,	 NA,	 0,  1)
-MUX_CFG("W4_USB_HIGHZ",		 D,    3,    4,	  3,   5,   0,	  3,	 0,  1)
-MUX_CFG("W4_GPIO58",		 D,    3,    7,	  3,   5,   0,	  3,	 0,  1)
-
-/* USB1 master */
-MUX_CFG("USB1_SUSP",		 8,   27,    2,	  1,  27,   0,	 NA,	 0,  1)
-MUX_CFG("USB1_SE0",		 9,    0,    2,	  1,  28,   0,	 NA,	 0,  1)
-MUX_CFG("W13_1610_USB1_SE0",	 9,    0,    4,	  1,  28,   0,	 NA,	 0,  1)
-MUX_CFG("USB1_TXEN",		 9,    3,    2,	  1,  29,   0,	 NA,	 0,  1)
-MUX_CFG("USB1_TXD",		 9,   24,    1,	  2,   4,   0,	 NA,	 0,  1)
-MUX_CFG("USB1_VP",		 A,    3,    1,	  2,   7,   0,	 NA,	 0,  1)
-MUX_CFG("USB1_VM",		 A,    6,    1,	  2,   8,   0,	 NA,	 0,  1)
-MUX_CFG("USB1_RCV",		 A,    9,    1,	  2,   9,   0,	 NA,	 0,  1)
-MUX_CFG("USB1_SPEED",		 A,   12,    2,	  2,  10,   0,	 NA,	 0,  1)
-MUX_CFG("R13_1610_USB1_SPEED",	 A,   12,    5,	  2,  10,   0,	 NA,	 0,  1)
-MUX_CFG("R13_1710_USB1_SEO",	 A,   12,    5,   2,  10,   0,   NA,     0,  1)
-
-/* USB2 master */
-MUX_CFG("USB2_SUSP",		 B,    3,    1,	  2,  17,   0,	 NA,	 0,  1)
-MUX_CFG("USB2_VP",		 B,    6,    1,	  2,  18,   0,	 NA,	 0,  1)
-MUX_CFG("USB2_TXEN",		 B,    9,    1,	  2,  19,   0,	 NA,	 0,  1)
-MUX_CFG("USB2_VM",		 C,   18,    1,	  3,   0,   0,	 NA,	 0,  1)
-MUX_CFG("USB2_RCV",		 C,   21,    1,	  3,   1,   0,	 NA,	 0,  1)
-MUX_CFG("USB2_SE0",		 C,   24,    2,	  3,   2,   0,	 NA,	 0,  1)
-MUX_CFG("USB2_TXD",		 C,   27,    2,	  3,   3,   0,	 NA,	 0,  1)
-
-/* OMAP-1510 GPIO */
-MUX_CFG("R18_1510_GPIO0",	 7,    9,    0,   1,  11,   1,    0,     0,  1)
-MUX_CFG("R19_1510_GPIO1",	 7,    6,    0,   1,  10,   1,    0,     0,  1)
-MUX_CFG("M14_1510_GPIO2",	 7,    3,    0,   1,   9,   1,    0,     0,  1)
-
-/* OMAP1610 GPIO */
-MUX_CFG("P18_1610_GPIO3",	 7,    0,    0,   1,   8,   0,   NA,     0,  1)
-MUX_CFG("Y15_1610_GPIO17",	 A,    0,    7,   2,   6,   0,   NA,     0,  1)
-
-/* OMAP-1710 GPIO */
-MUX_CFG("R18_1710_GPIO0",        7,    9,    0,   1,  11,   1,    1,     1,  1)
-MUX_CFG("V2_1710_GPIO10",        F,   27,    1,   4,   3,   1,    4,     1,  1)
-MUX_CFG("N21_1710_GPIO14",       6,    9,    0,   1,   1,   1,    1,     1,  1)
-MUX_CFG("W15_1710_GPIO40",       9,   27,    7,   2,   5,   1,    2,     1,  1)
-
-/* MPUIO */
-MUX_CFG("MPUIO2",		 7,   18,    0,	  1,  14,   1,	 NA,	 0,  1)
-MUX_CFG("N15_1610_MPUIO2",	 7,   18,    0,	  1,  14,   1,	  1,	 0,  1)
-MUX_CFG("MPUIO4",		 7,   15,    0,	  1,  13,   1,	 NA,	 0,  1)
-MUX_CFG("MPUIO5",		 7,   12,    0,	  1,  12,   1,	 NA,	 0,  1)
-
-MUX_CFG("T20_1610_MPUIO5",	 7,   12,    0,	  1,  12,   0,	  3,	 0,  1)
-MUX_CFG("W11_1610_MPUIO6",	10,   15,    2,	  3,   8,   0,	  3,	 0,  1)
-MUX_CFG("V10_1610_MPUIO7",	 A,   24,    2,	  2,  14,   0,	  2,	 0,  1)
-MUX_CFG("W11_1610_MPUIO9",	10,   15,    1,	  3,   8,   0,	  3,	 0,  1)
-MUX_CFG("V10_1610_MPUIO10",	 A,   24,    1,	  2,  14,   0,	  2,	 0,  1)
-MUX_CFG("W10_1610_MPUIO11",	 A,   18,    2,	  2,  11,   0,	  2,	 0,  1)
-MUX_CFG("E20_1610_MPUIO13",	 3,   21,    1,	  0,   7,   0,	  0,	 0,  1)
-MUX_CFG("U20_1610_MPUIO14",	 9,    6,    6,	  0,  30,   0,	  0,	 0,  1)
-MUX_CFG("E19_1610_MPUIO15",	 3,   18,    1,	  0,   6,   0,	  0,	 0,  1)
-
-/* MCBSP2 */
-MUX_CFG("MCBSP2_CLKR",		 C,    6,    0,	  2,  27,   1,	 NA,	 0,  1)
-MUX_CFG("MCBSP2_CLKX",		 C,    9,    0,	  2,  29,   1,	 NA,	 0,  1)
-MUX_CFG("MCBSP2_DR",		 C,    0,    0,	  2,  26,   1,	 NA,	 0,  1)
-MUX_CFG("MCBSP2_DX",		 C,   15,    0,	  2,  31,   1,	 NA,	 0,  1)
-MUX_CFG("MCBSP2_FSR",		 C,   12,    0,	  2,  30,   1,	 NA,	 0,  1)
-MUX_CFG("MCBSP2_FSX",		 C,    3,    0,	  2,  27,   1,	 NA,	 0,  1)
-
-/* MCBSP3 NOTE: Mode must 1 for clock */
-MUX_CFG("MCBSP3_CLKX",		 9,    3,    1,	  1,  29,   0,	 NA,	 0,  1)
-
-/* Misc ballouts */
-MUX_CFG("BALLOUT_V8_ARMIO3",	 B,   18,    0,	  2,  25,   1,	 NA,	 0,  1)
-MUX_CFG("N20_HDQ",	       6,   18,    1,   1,   4,   0,    1,     4,  0)
-
-/* OMAP-1610 MMC2 */
-MUX_CFG("W8_1610_MMC2_DAT0",	 B,   21,    6,	  2,  23,   1,	  2,	 1,  1)
-MUX_CFG("V8_1610_MMC2_DAT1",	 B,   27,    6,	  2,  25,   1,	  2,	 1,  1)
-MUX_CFG("W15_1610_MMC2_DAT2",	 9,   12,    6,	  2,   5,   1,	  2,	 1,  1)
-MUX_CFG("R10_1610_MMC2_DAT3",	 B,   18,    6,	  2,  22,   1,	  2,	 1,  1)
-MUX_CFG("Y10_1610_MMC2_CLK",	 B,    3,    6,	  2,  17,   0,	  2,	 0,  1)
-MUX_CFG("Y8_1610_MMC2_CMD",	 B,   24,    6,	  2,  24,   1,	  2,	 1,  1)
-MUX_CFG("V9_1610_MMC2_CMDDIR",	 B,   12,    6,	  2,  20,   0,	  2,	 1,  1)
-MUX_CFG("V5_1610_MMC2_DATDIR0",	 B,   15,    6,	  2,  21,   0,	  2,	 1,  1)
-MUX_CFG("W19_1610_MMC2_DATDIR1", 8,   15,    6,	  1,  23,   0,	  1,	 1,  1)
-MUX_CFG("R18_1610_MMC2_CLKIN",	 7,    9,    6,	  1,  11,   0,	  1,	11,  1)
-
-/* OMAP-1610 External Trace Interface */
-MUX_CFG("M19_1610_ETM_PSTAT0",	 5,   27,    1,	  0,  29,   0,	  0,	 0,  1)
-MUX_CFG("L15_1610_ETM_PSTAT1",	 5,   24,    1,	  0,  28,   0,	  0,	 0,  1)
-MUX_CFG("L18_1610_ETM_PSTAT2",	 5,   21,    1,	  0,  27,   0,	  0,	 0,  1)
-MUX_CFG("L19_1610_ETM_D0",	 5,   18,    1,	  0,  26,   0,	  0,	 0,  1)
-MUX_CFG("J19_1610_ETM_D6",	 5,    0,    1,	  0,  20,   0,	  0,	 0,  1)
-MUX_CFG("J18_1610_ETM_D7",	 5,   27,    1,	  0,  19,   0,	  0,	 0,  1)
-
-/* OMAP16XX GPIO */
-MUX_CFG("P20_1610_GPIO4",	 6,   27,    0,	  1,   7,   0,	  1,	 1,  1)
-MUX_CFG("V9_1610_GPIO7",	 B,   12,    1,	  2,  20,   0,	  2,	 1,  1)
-MUX_CFG("W8_1610_GPIO9",	 B,   21,    0,	  2,  23,   0,	  2,	 1,  1)
-MUX_CFG("N20_1610_GPIO11",       6,   18,    0,   1,   4,   0,    1,     1,  1)
-MUX_CFG("N19_1610_GPIO13",	 6,   12,    0,	  1,   2,   0,	  1,	 1,  1)
-MUX_CFG("P10_1610_GPIO22",	 C,    0,    7,	  2,  26,   0,	  2,	 1,  1)
-MUX_CFG("V5_1610_GPIO24",	 B,   15,    7,	  2,  21,   0,	  2,	 1,  1)
-MUX_CFG("AA20_1610_GPIO_41",	 9,    9,    7,	  1,  31,   0,	  1,	 1,  1)
-MUX_CFG("W19_1610_GPIO48",	 8,   15,    7,   1,  23,   1,    1,     0,  1)
-MUX_CFG("M7_1610_GPIO62",	10,    0,    0,   4,  24,   0,    4,     0,  1)
-MUX_CFG("V14_16XX_GPIO37",	 9,   18,    7,	  2,   2,   0,	  2,	 2,  0)
-MUX_CFG("R9_16XX_GPIO18",	 C,   18,    7,   3,   0,   0,    3,     0,  0)
-MUX_CFG("L14_16XX_GPIO49",	 6,    3,    7,   0,  31,   0,    0,    31,  0)
-
-/* OMAP-1610 uWire */
-MUX_CFG("V19_1610_UWIRE_SCLK",	 8,    6,    0,	  1,  20,   0,	  1,	 1,  1)
-MUX_CFG("U18_1610_UWIRE_SDI",	 8,    0,    0,	  1,  18,   0,	  1,	 1,  1)
-MUX_CFG("W21_1610_UWIRE_SDO",	 8,    3,    0,	  1,  19,   0,	  1,	 1,  1)
-MUX_CFG("N14_1610_UWIRE_CS0",	 8,    9,    1,	  1,  21,   0,	  1,	 1,  1)
-MUX_CFG("P15_1610_UWIRE_CS3",	 8,   12,    1,	  1,  22,   0,	  1,	 1,  1)
-MUX_CFG("N15_1610_UWIRE_CS1",	 7,   18,    2,	  1,  14,   0,	 NA,	 0,  1)
-
-/* OMAP-1610 Flash */
-MUX_CFG("L3_1610_FLASH_CS2B_OE",10,    6,    1,	 NA,   0,   0,	 NA,	 0,  1)
-MUX_CFG("M8_1610_FLASH_CS2B_WE",10,    3,    1,	 NA,   0,   0,	 NA,	 0,  1)
-
-/* First MMC interface, same on 1510, 1610 and 1710 */
-MUX_CFG("MMC_CMD",		 A,   27,    0,	  2,  15,   1,	  2,	 1,  1)
-MUX_CFG("MMC_DAT1",		 A,   24,    0,	  2,  14,   1,	  2,	 1,  1)
-MUX_CFG("MMC_DAT2",		 A,   18,    0,	  2,  12,   1,	  2,	 1,  1)
-MUX_CFG("MMC_DAT0",		 B,    0,    0,	  2,  16,   1,	  2,	 1,  1)
-MUX_CFG("MMC_CLK",		 A,   21,    0,	 NA,   0,   0,	 NA,	 0,  1)
-MUX_CFG("MMC_DAT3",		10,   15,    0,	  3,   8,   1,	  3,	 1,  1)
-MUX_CFG("M15_1710_MMC_CLKI",	 6,   21,    2,   0,   0,   0,   NA,     0,  1)
-MUX_CFG("P19_1710_MMC_CMDDIR",	 6,   24,    6,   0,   0,   0,   NA,     0,  1)
-MUX_CFG("P20_1710_MMC_DATDIR0",	 6,   27,    5,   0,   0,   0,   NA,     0,  1)
-
-/* OMAP-1610 USB0 alternate configuration */
-MUX_CFG("W9_USB0_TXEN",		 B,   9,     5,	  2,  19,   0,	  2,	 0,  1)
-MUX_CFG("AA9_USB0_VP",		 B,   6,     5,	  2,  18,   0,	  2,	 0,  1)
-MUX_CFG("Y5_USB0_RCV",		 C,  21,     5,	  3,   1,   0,	  1,	 0,  1)
-MUX_CFG("R9_USB0_VM",		 C,  18,     5,	  3,   0,   0,	  3,	 0,  1)
-MUX_CFG("V6_USB0_TXD",		 C,  27,     5,	  3,   3,   0,	  3,	 0,  1)
-MUX_CFG("W5_USB0_SE0",		 C,  24,     5,	  3,   2,   0,	  3,	 0,  1)
-MUX_CFG("V9_USB0_SPEED",	 B,  12,     5,	  2,  20,   0,	  2,	 0,  1)
-MUX_CFG("Y10_USB0_SUSP",	 B,   3,     5,	  2,  17,   0,	  2,	 0,  1)
-
-/* USB2 interface */
-MUX_CFG("W9_USB2_TXEN",		 B,   9,     1,	 NA,   0,   0,	 NA,	 0,  1)
-MUX_CFG("AA9_USB2_VP",		 B,   6,     1,	 NA,   0,   0,	 NA,	 0,  1)
-MUX_CFG("Y5_USB2_RCV",		 C,  21,     1,	 NA,   0,   0,	 NA,	 0,  1)
-MUX_CFG("R9_USB2_VM",		 C,  18,     1,	 NA,   0,   0,	 NA,	 0,  1)
-MUX_CFG("V6_USB2_TXD",		 C,  27,     2,	 NA,   0,   0,	 NA,	 0,  1)
-MUX_CFG("W5_USB2_SE0",		 C,  24,     2,	 NA,   0,   0,	 NA,	 0,  1)
-
-/* 16XX UART */
-MUX_CFG("R13_1610_UART1_TX",	 A,  12,     6,	  2,  10,   0,	  2,	10,  1)
-MUX_CFG("V14_16XX_UART1_RX",	 9,  18,     0,	  2,   2,   0,	  2,	 2,  1)
-MUX_CFG("R14_1610_UART1_CTS",	 9,  15,     0,	  2,   1,   0,	  2,	 1,  1)
-MUX_CFG("AA15_1610_UART1_RTS",	 9,  12,     1,	  2,   0,   0,	  2,	 0,  1)
-MUX_CFG("R9_16XX_UART2_RX",	 C,  18,     0,   3,   0,   0,    3,     0,  1)
-MUX_CFG("L14_16XX_UART3_RX",	 6,   3,     0,   0,  31,   0,    0,    31,  1)
-
-/* I2C interface */
-MUX_CFG("I2C_SCL",		 7,  24,     0,	 NA,   0,   0,	 NA,	 0,  0)
-MUX_CFG("I2C_SDA",		 7,  27,     0,	 NA,   0,   0,	 NA,	 0,  0)
-
-/* Keypad */
-MUX_CFG("F18_1610_KBC0",	 3,  15,     0,	  0,   5,   1,	  0,	 0,  0)
-MUX_CFG("D20_1610_KBC1",	 3,  12,     0,	  0,   4,   1,	  0,	 0,  0)
-MUX_CFG("D19_1610_KBC2",	 3,   9,     0,	  0,   3,   1,	  0,	 0,  0)
-MUX_CFG("E18_1610_KBC3",	 3,   6,     0,	  0,   2,   1,	  0,	 0,  0)
-MUX_CFG("C21_1610_KBC4",	 3,   3,     0,	  0,   1,   1,	  0,	 0,  0)
-MUX_CFG("G18_1610_KBR0",	 4,   0,     0,	  0,   10,  1,	  0,	 1,  0)
-MUX_CFG("F19_1610_KBR1",	 3,   27,    0,	  0,   9,   1,	  0,	 1,  0)
-MUX_CFG("H14_1610_KBR2",	 3,   24,    0,	  0,   8,   1,	  0,	 1,  0)
-MUX_CFG("E20_1610_KBR3",	 3,   21,    0,	  0,   7,   1,	  0,	 1,  0)
-MUX_CFG("E19_1610_KBR4",	 3,   18,    0,	  0,   6,   1,	  0,	 1,  0)
-MUX_CFG("N19_1610_KBR5",	 6,  12,     1,	  1,   2,   1,	  1,	 1,  0)
-
-/* Power management */
-MUX_CFG("T20_1610_LOW_PWR",	 7,   12,    1,	  NA,   0,   0,   NA,	 0,  0)
-
-/* MCLK Settings */
-MUX_CFG("V5_1710_MCLK_ON",	 B,   15,    0,	  NA,   0,   0,   NA,	 0,  0)
-MUX_CFG("V5_1710_MCLK_OFF",	 B,   15,    6,	  NA,   0,   0,   NA,	 0,  0)
-MUX_CFG("R10_1610_MCLK_ON",	 B,   18,    0,	  NA,  22,   0,	  NA,	 1,  0)
-MUX_CFG("R10_1610_MCLK_OFF",	 B,   18,    6,	  2,   22,   1,	  2,	 1,  1)
-
-/* CompactFlash controller, conflicts with MMC1 */
-MUX_CFG("P11_1610_CF_CD2",	 A,   27,    3,	  2,   15,   1,	  2,	 1,  1)
-MUX_CFG("R11_1610_CF_IOIS16",	 B,    0,    3,	  2,   16,   1,	  2,	 1,  1)
-MUX_CFG("V10_1610_CF_IREQ",	 A,   24,    3,	  2,   14,   0,	  2,	 0,  1)
-MUX_CFG("W10_1610_CF_RESET",	 A,   18,    3,	  2,   12,   1,	  2,	 1,  1)
-MUX_CFG("W11_1610_CF_CD1",	10,   15,    3,	  3,    8,   1,	  3,	 1,  1)
 };
 
-#endif	/* __MUX_C__ */
+enum omap24xx_index {
+	/* 24xx I2C */
+	M19_24XX_I2C1_SCL,
+	L15_24XX_I2C1_SDA,
+	J15_24XX_I2C2_SCL,
+	H19_24XX_I2C2_SDA,
+
+	/* 24xx Menelaus interrupt */
+	W19_24XX_SYS_NIRQ,
+
+	/* 24xx GPIO */
+	Y20_24XX_GPIO60,
+	M15_24XX_GPIO92,
+};
 
 #ifdef	CONFIG_OMAP_MUX
 /* setup pin muxing in Linux */
-extern int omap_cfg_reg(reg_cfg_t reg_cfg);
+extern int omap1_mux_init(void);
+extern int omap2_mux_init(void);
+extern int omap_mux_register(struct pin_config * pins, unsigned long size);
+extern int omap_cfg_reg(unsigned long reg_cfg);
 #else
 /* boot loader does it all (no warnings from CONFIG_OMAP_MUX_WARNINGS) */
-static inline int omap_cfg_reg(reg_cfg_t reg_cfg) { return 0; }
+static inline int omap1_mux_init(void) { return 0; }
+static inline int omap2_mux_init(void) { return 0; }
+static inline int omap_cfg_reg(unsigned long reg_cfg) { return 0; }
 #endif
 
 #endif
diff --git a/include/asm-arm/arch-omap/omap1510.h b/include/asm-arm/arch-omap/omap1510.h
index f086a39..c575d35 100644
--- a/include/asm-arm/arch-omap/omap1510.h
+++ b/include/asm-arm/arch-omap/omap1510.h
@@ -25,8 +25,8 @@
  * 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#ifndef __ASM_ARCH_OMAP1510_H
-#define __ASM_ARCH_OMAP1510_H
+#ifndef __ASM_ARCH_OMAP15XX_H
+#define __ASM_ARCH_OMAP15XX_H
 
 /*
  * ----------------------------------------------------------------------------
@@ -44,5 +44,5 @@
 #define OMAP1510_DSPREG_SIZE	SZ_128K
 #define OMAP1510_DSPREG_START	0xE1000000
 
-#endif /*  __ASM_ARCH_OMAP1510_H */
+#endif /*  __ASM_ARCH_OMAP15XX_H */
 
diff --git a/include/asm-arm/arch-omap/omap24xx.h b/include/asm-arm/arch-omap/omap24xx.h
index a910546..6e59805 100644
--- a/include/asm-arm/arch-omap/omap24xx.h
+++ b/include/asm-arm/arch-omap/omap24xx.h
@@ -1,15 +1,24 @@
 #ifndef __ASM_ARCH_OMAP24XX_H
 #define __ASM_ARCH_OMAP24XX_H
 
-#define OMAP24XX_L4_IO_BASE	0x48000000
+/*
+ * Please place only base defines here and put the rest in device
+ * specific headers. Note also that some of these defines are needed
+ * for omap1 to compile without adding ifdefs.
+ */
+
+#define L4_24XX_BASE		0x48000000
+#define L3_24XX_BASE		0x68000000
 
 /* interrupt controller */
-#define OMAP24XX_IC_BASE	(OMAP24XX_L4_IO_BASE + 0xfe000)
+#define OMAP24XX_IC_BASE	(L4_24XX_BASE + 0xfe000)
 #define VA_IC_BASE		IO_ADDRESS(OMAP24XX_IC_BASE)
-
 #define OMAP24XX_IVA_INTC_BASE	0x40000000
-
 #define IRQ_SIR_IRQ		0x0040
 
+#define OMAP24XX_32KSYNCT_BASE	(L4_24XX_BASE + 0x4000)
+#define OMAP24XX_PRCM_BASE	(L4_24XX_BASE + 0x8000)
+#define OMAP24XX_SDRC_BASE	(L3_24XX_BASE + 0x9000)
+
 #endif /* __ASM_ARCH_OMAP24XX_H */
 
diff --git a/include/asm-arm/arch-omap/omapfb.h b/include/asm-arm/arch-omap/omapfb.h
new file mode 100644
index 0000000..4ba2622
--- /dev/null
+++ b/include/asm-arm/arch-omap/omapfb.h
@@ -0,0 +1,281 @@
+/*
+ * File: include/asm-arm/arch-omap/omapfb.h
+ *
+ * Framebuffer driver for TI OMAP boards
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef __OMAPFB_H
+#define __OMAPFB_H
+
+/* IOCTL commands. */
+
+#define OMAP_IOW(num, dtype)	_IOW('O', num, dtype)
+#define OMAP_IOR(num, dtype)	_IOR('O', num, dtype)
+#define OMAP_IOWR(num, dtype)	_IOWR('O', num, dtype)
+#define OMAP_IO(num)		_IO('O', num)
+
+#define OMAPFB_MIRROR		OMAP_IOW(31, int)
+#define OMAPFB_SYNC_GFX		OMAP_IO(37)
+#define OMAPFB_VSYNC		OMAP_IO(38)
+#define OMAPFB_SET_UPDATE_MODE	OMAP_IOW(40, enum omapfb_update_mode)
+#define OMAPFB_GET_CAPS		OMAP_IOR(42, unsigned long)
+#define OMAPFB_GET_UPDATE_MODE	OMAP_IOW(43, enum omapfb_update_mode)
+#define OMAPFB_LCD_TEST		OMAP_IOW(45, int)
+#define OMAPFB_CTRL_TEST	OMAP_IOW(46, int)
+#define OMAPFB_UPDATE_WINDOW	OMAP_IOW(47, struct omapfb_update_window)
+#define OMAPFB_SETUP_PLANE	OMAP_IOW(48, struct omapfb_setup_plane)
+#define OMAPFB_ENABLE_PLANE	OMAP_IOW(49, struct omapfb_enable_plane)
+#define OMAPFB_SET_COLOR_KEY	OMAP_IOW(50, struct omapfb_color_key)
+
+#define OMAPFB_CAPS_GENERIC_MASK	0x00000fff
+#define OMAPFB_CAPS_LCDC_MASK		0x00fff000
+#define OMAPFB_CAPS_PANEL_MASK		0xff000000
+
+#define OMAPFB_CAPS_MANUAL_UPDATE	0x00001000
+#define OMAPFB_CAPS_SET_BACKLIGHT	0x01000000
+
+/* Values from DSP must map to lower 16-bits */
+#define OMAPFB_FORMAT_MASK         0x00ff
+#define OMAPFB_FORMAT_FLAG_DOUBLE  0x0100
+
+enum omapfb_color_format {
+	OMAPFB_COLOR_RGB565 = 0,
+	OMAPFB_COLOR_YUV422,
+	OMAPFB_COLOR_YUV420,
+	OMAPFB_COLOR_CLUT_8BPP,
+	OMAPFB_COLOR_CLUT_4BPP,
+	OMAPFB_COLOR_CLUT_2BPP,
+	OMAPFB_COLOR_CLUT_1BPP,
+};
+
+struct omapfb_update_window {
+	u32 x, y;
+	u32 width, height;
+	u32 format;
+};
+
+enum omapfb_plane {
+	OMAPFB_PLANE_GFX = 0,
+	OMAPFB_PLANE_VID1,
+	OMAPFB_PLANE_VID2,
+};
+
+enum omapfb_channel_out {
+	OMAPFB_CHANNEL_OUT_LCD = 0,
+	OMAPFB_CHANNEL_OUT_DIGIT,
+};
+
+struct omapfb_setup_plane {
+	u8  plane;
+	u8  channel_out;
+	u32 offset;
+	u32 pos_x, pos_y;
+	u32 width, height;
+	u32 color_mode;
+};
+
+struct omapfb_enable_plane {
+	u8  plane;
+	u8  enable;
+};
+
+enum omapfb_color_key_type {
+	OMAPFB_COLOR_KEY_DISABLED = 0,
+	OMAPFB_COLOR_KEY_GFX_DST,
+	OMAPFB_COLOR_KEY_VID_SRC,
+};
+
+struct omapfb_color_key {
+	u8  channel_out;
+	u32 background;
+	u32 trans_key;
+	u8  key_type;
+};
+
+enum omapfb_update_mode {
+	OMAPFB_UPDATE_DISABLED = 0,
+	OMAPFB_AUTO_UPDATE,
+	OMAPFB_MANUAL_UPDATE
+};
+
+#ifdef __KERNEL__
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+
+#define OMAP_LCDC_INV_VSYNC             0x0001
+#define OMAP_LCDC_INV_HSYNC             0x0002
+#define OMAP_LCDC_INV_PIX_CLOCK         0x0004
+#define OMAP_LCDC_INV_OUTPUT_EN         0x0008
+#define OMAP_LCDC_HSVS_RISING_EDGE      0x0010
+#define OMAP_LCDC_HSVS_OPPOSITE         0x0020
+
+#define OMAP_LCDC_SIGNAL_MASK		0x003f
+
+#define OMAP_LCDC_PANEL_TFT		0x0100
+
+#ifdef CONFIG_ARCH_OMAP1
+#define OMAPFB_PLANE_NUM		1
+#else
+#define OMAPFB_PLANE_NUM		3
+#endif
+
+struct omapfb_device;
+
+struct lcd_panel {
+	const char	*name;
+	int		config;		/* TFT/STN, signal inversion */
+	int		bpp;		/* Pixel format in fb mem */
+	int		data_lines;	/* Lines on LCD HW interface */
+
+	int		x_res, y_res;
+	int		pixel_clock;	/* In kHz */
+	int		hsw;		/* Horizontal synchronization
+					   pulse width */
+	int		hfp;		/* Horizontal front porch */
+	int		hbp;		/* Horizontal back porch */
+	int		vsw;		/* Vertical synchronization
+					   pulse width */
+	int		vfp;		/* Vertical front porch */
+	int		vbp;		/* Vertical back porch */
+	int		acb;		/* ac-bias pin frequency */
+	int		pcd;		/* pixel clock divider.
+					   Obsolete use pixel_clock instead */
+
+	int		(*init)		(struct omapfb_device *fbdev);
+	void		(*cleanup)	(void);
+	int		(*enable)	(void);
+	void		(*disable)	(void);
+	unsigned long	(*get_caps)	(void);
+	int		(*set_bklight_level)(unsigned int level);
+	unsigned int	(*get_bklight_level)(void);
+	unsigned int	(*get_bklight_max)  (void);
+	int		(*run_test)	(int test_num);
+};
+
+struct omapfb_device;
+
+struct extif_timings {
+	int cs_on_time;
+	int cs_off_time;
+	int we_on_time;
+	int we_off_time;
+	int re_on_time;
+	int re_off_time;
+	int we_cycle_time;
+	int re_cycle_time;
+	int cs_pulse_width;
+	int access_time;
+};
+
+struct lcd_ctrl_extif {
+	int  (*init)		(void);
+	void (*cleanup)		(void);
+	void (*set_timings)	(const struct extif_timings *timings);
+	void (*write_command)	(u32 cmd);
+	u32  (*read_data)	(void);
+	void (*write_data)	(u32 data);
+	void (*transfer_area)	(int width, int height,
+				 void (callback)(void * data), void *data);
+};
+
+struct lcd_ctrl {
+	const char	*name;
+	void		*data;
+
+	int		(*init)		  (struct omapfb_device *fbdev,
+					   int ext_mode, int req_vram_size);
+	void		(*cleanup)	  (void);
+	void		(*get_vram_layout)(unsigned long *size,
+					   void **virt_base,
+					   dma_addr_t *phys_base);
+	unsigned long	(*get_caps)	  (void);
+	int		(*set_update_mode)(enum omapfb_update_mode mode);
+	enum omapfb_update_mode (*get_update_mode)(void);
+	int		(*setup_plane)	  (int plane, int channel_out,
+					   unsigned long offset,
+					   int screen_width,
+					   int pos_x, int pos_y, int width,
+					   int height, int color_mode);
+	int		(*enable_plane)	  (int plane, int enable);
+	int		(*update_window)  (struct omapfb_update_window *win,
+					   void (*callback)(void *),
+					   void *callback_data);
+	void		(*sync)		  (void);
+	void		(*suspend)	  (void);
+	void		(*resume)	  (void);
+	int		(*run_test)	  (int test_num);
+	int		(*setcolreg)	  (u_int regno, u16 red, u16 green,
+					   u16 blue, u16 transp,
+					   int update_hw_mem);
+	int		(*set_color_key)  (struct omapfb_color_key *ck);
+
+};
+
+enum omapfb_state {
+	OMAPFB_DISABLED	= 0,
+	OMAPFB_SUSPENDED= 99,
+	OMAPFB_ACTIVE	= 100
+};
+
+struct omapfb_device {
+	int			state;
+	int                     ext_lcdc;               /* Using external
+                                                           LCD controller */
+	struct semaphore	rqueue_sema;
+
+	void			*vram_virt_base;
+	dma_addr_t		vram_phys_base;
+	unsigned long		vram_size;
+
+	int			color_mode;
+	int			palette_size;
+	int			mirror;
+	u32			pseudo_palette[17];
+
+	struct lcd_panel	*panel;			/* LCD panel */
+	struct lcd_ctrl         *ctrl;			/* LCD controller */
+	struct lcd_ctrl		*int_ctrl;		/* internal LCD ctrl */
+	struct lcd_ctrl_extif	*ext_if;		/* LCD ctrl external
+							   interface */
+	struct fb_info		*fb_info;
+
+	struct device		*dev;
+};
+
+extern struct lcd_panel h3_panel;
+extern struct lcd_panel h2_panel;
+extern struct lcd_panel p2_panel;
+extern struct lcd_panel osk_panel;
+extern struct lcd_panel innovator1610_panel;
+extern struct lcd_panel innovator1510_panel;
+
+#ifdef CONFIG_ARCH_OMAP1
+extern struct lcd_ctrl omap1_lcd_ctrl;
+#else
+extern struct lcd_ctrl omap2_disp_ctrl;
+#endif
+
+extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval);
+
+#endif /* __KERNEL__ */
+
+#endif /* __OMAPFB_H */
diff --git a/include/asm-arm/arch-omap/pm.h b/include/asm-arm/arch-omap/pm.h
index fbd742d..7c79042 100644
--- a/include/asm-arm/arch-omap/pm.h
+++ b/include/asm-arm/arch-omap/pm.h
@@ -98,7 +98,14 @@
 #define OMAP1610_IDLECT3		0xfffece24
 #define OMAP1610_IDLE_LOOP_REQUEST	0x0400
 
-#if     !defined(CONFIG_ARCH_OMAP1510) && \
+#define OMAP730_IDLECT1_SLEEP_VAL	0x16c7
+#define OMAP730_IDLECT2_SLEEP_VAL	0x09c7
+#define OMAP730_IDLECT3_VAL		0x3f
+#define OMAP730_IDLECT3		0xfffece24
+#define OMAP730_IDLE_LOOP_REQUEST	0x0C00
+
+#if     !defined(CONFIG_ARCH_OMAP730) && \
+	!defined(CONFIG_ARCH_OMAP15XX) && \
 	!defined(CONFIG_ARCH_OMAP16XX) && \
 	!defined(CONFIG_ARCH_OMAP24XX)
 #error "Power management for this processor not implemented yet"
@@ -107,8 +114,10 @@
 #ifndef __ASSEMBLER__
 extern void omap_pm_idle(void);
 extern void omap_pm_suspend(void);
+extern void omap730_cpu_suspend(unsigned short, unsigned short);
 extern void omap1510_cpu_suspend(unsigned short, unsigned short);
 extern void omap1610_cpu_suspend(unsigned short, unsigned short);
+extern void omap730_idle_loop_suspend(void);
 extern void omap1510_idle_loop_suspend(void);
 extern void omap1610_idle_loop_suspend(void);
 
@@ -118,6 +127,8 @@
 #define omap_serial_wake_trigger(x)	{}
 #endif	/* CONFIG_OMAP_SERIAL_WAKE */
 
+extern unsigned int omap730_cpu_suspend_sz;
+extern unsigned int omap730_idle_loop_suspend_sz;
 extern unsigned int omap1510_cpu_suspend_sz;
 extern unsigned int omap1510_idle_loop_suspend_sz;
 extern unsigned int omap1610_cpu_suspend_sz;
@@ -131,6 +142,10 @@
 #define ULPD_RESTORE(x) omap_writew((ulpd_sleep_save[ULPD_SLEEP_SAVE_##x]), (x))
 #define ULPD_SHOW(x) ulpd_sleep_save[ULPD_SLEEP_SAVE_##x]
 
+#define MPUI730_SAVE(x) mpui730_sleep_save[MPUI730_SLEEP_SAVE_##x] = omap_readl(x)
+#define MPUI730_RESTORE(x) omap_writel((mpui730_sleep_save[MPUI730_SLEEP_SAVE_##x]), (x))
+#define MPUI730_SHOW(x) mpui730_sleep_save[MPUI730_SLEEP_SAVE_##x]
+
 #define MPUI1510_SAVE(x) mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_##x] = omap_readl(x)
 #define MPUI1510_RESTORE(x) omap_writel((mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_##x]), (x))
 #define MPUI1510_SHOW(x) mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_##x]
@@ -188,13 +203,34 @@
 	MPUI1510_SLEEP_SAVE_EMIFS_CONFIG,
 	MPUI1510_SLEEP_SAVE_OMAP_IH1_MIR,
 	MPUI1510_SLEEP_SAVE_OMAP_IH2_MIR,
-#if defined(CONFIG_ARCH_OMAP1510)
+#if defined(CONFIG_ARCH_OMAP15XX)
 	MPUI1510_SLEEP_SAVE_SIZE
 #else
 	MPUI1510_SLEEP_SAVE_SIZE = 0
 #endif
 };
 
+enum mpui730_save_state {
+	MPUI730_SLEEP_SAVE_START = 0,
+	/*
+	 * MPUI registers 32 bits
+	 */
+	MPUI730_SLEEP_SAVE_MPUI_CTRL,
+	MPUI730_SLEEP_SAVE_MPUI_DSP_BOOT_CONFIG,
+	MPUI730_SLEEP_SAVE_MPUI_DSP_API_CONFIG,
+	MPUI730_SLEEP_SAVE_MPUI_DSP_STATUS,
+	MPUI730_SLEEP_SAVE_EMIFF_SDRAM_CONFIG,
+	MPUI730_SLEEP_SAVE_EMIFS_CONFIG,
+	MPUI730_SLEEP_SAVE_OMAP_IH1_MIR,
+	MPUI730_SLEEP_SAVE_OMAP_IH2_0_MIR,
+	MPUI730_SLEEP_SAVE_OMAP_IH2_1_MIR,
+#if defined(CONFIG_ARCH_OMAP730)
+	MPUI730_SLEEP_SAVE_SIZE
+#else
+	MPUI730_SLEEP_SAVE_SIZE = 0
+#endif
+};
+
 enum mpui1610_save_state {
 	MPUI1610_SLEEP_SAVE_START = 0,
 	/*
diff --git a/include/asm-arm/arch-omap/prcm.h b/include/asm-arm/arch-omap/prcm.h
new file mode 100644
index 0000000..7b48a5c
--- /dev/null
+++ b/include/asm-arm/arch-omap/prcm.h
@@ -0,0 +1,429 @@
+/*
+ * prcm.h - Access definations for use in OMAP24XX clock and power management
+ *
+ * Copyright (C) 2005 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_ARCH_DPM_PRCM_H
+#define __ASM_ARM_ARCH_DPM_PRCM_H
+
+/* SET_PERFORMANCE_LEVEL PARAMETERS */
+#define PRCM_HALF_SPEED 1
+#define PRCM_FULL_SPEED 2
+
+#ifndef __ASSEMBLER__
+
+#define PRCM_REG32(offset)	__REG32(OMAP24XX_PRCM_BASE + (offset))
+
+#define PRCM_REVISION		PRCM_REG32(0x000)
+#define PRCM_SYSCONFIG		PRCM_REG32(0x010)
+#define PRCM_IRQSTATUS_MPU	PRCM_REG32(0x018)
+#define PRCM_IRQENABLE_MPU	PRCM_REG32(0x01C)
+#define PRCM_VOLTCTRL		PRCM_REG32(0x050)
+#define PRCM_VOLTST		PRCM_REG32(0x054)
+#define PRCM_CLKSRC_CTRL	PRCM_REG32(0x060)
+#define PRCM_CLKOUT_CTRL	PRCM_REG32(0x070)
+#define PRCM_CLKEMUL_CTRL	PRCM_REG32(0x078)
+#define PRCM_CLKCFG_CTRL	PRCM_REG32(0x080)
+#define PRCM_CLKCFG_STATUS	PRCM_REG32(0x084)
+#define PRCM_VOLTSETUP		PRCM_REG32(0x090)
+#define PRCM_CLKSSETUP		PRCM_REG32(0x094)
+#define PRCM_POLCTRL		PRCM_REG32(0x098)
+
+/* GENERAL PURPOSE */
+#define GENERAL_PURPOSE1	PRCM_REG32(0x0B0)
+#define GENERAL_PURPOSE2	PRCM_REG32(0x0B4)
+#define GENERAL_PURPOSE3	PRCM_REG32(0x0B8)
+#define GENERAL_PURPOSE4	PRCM_REG32(0x0BC)
+#define GENERAL_PURPOSE5	PRCM_REG32(0x0C0)
+#define GENERAL_PURPOSE6	PRCM_REG32(0x0C4)
+#define GENERAL_PURPOSE7	PRCM_REG32(0x0C8)
+#define GENERAL_PURPOSE8	PRCM_REG32(0x0CC)
+#define GENERAL_PURPOSE9	PRCM_REG32(0x0D0)
+#define GENERAL_PURPOSE10	PRCM_REG32(0x0D4)
+#define GENERAL_PURPOSE11	PRCM_REG32(0x0D8)
+#define GENERAL_PURPOSE12	PRCM_REG32(0x0DC)
+#define GENERAL_PURPOSE13	PRCM_REG32(0x0E0)
+#define GENERAL_PURPOSE14	PRCM_REG32(0x0E4)
+#define GENERAL_PURPOSE15	PRCM_REG32(0x0E8)
+#define GENERAL_PURPOSE16	PRCM_REG32(0x0EC)
+#define GENERAL_PURPOSE17	PRCM_REG32(0x0F0)
+#define GENERAL_PURPOSE18	PRCM_REG32(0x0F4)
+#define GENERAL_PURPOSE19	PRCM_REG32(0x0F8)
+#define GENERAL_PURPOSE20	PRCM_REG32(0x0FC)
+
+/* MPU */
+#define CM_CLKSEL_MPU		PRCM_REG32(0x140)
+#define CM_CLKSTCTRL_MPU	PRCM_REG32(0x148)
+#define RM_RSTST_MPU		PRCM_REG32(0x158)
+#define PM_WKDEP_MPU		PRCM_REG32(0x1C8)
+#define PM_EVGENCTRL_MPU	PRCM_REG32(0x1D4)
+#define PM_EVEGENONTIM_MPU	PRCM_REG32(0x1D8)
+#define PM_EVEGENOFFTIM_MPU	PRCM_REG32(0x1DC)
+#define PM_PWSTCTRL_MPU		PRCM_REG32(0x1E0)
+#define PM_PWSTST_MPU		PRCM_REG32(0x1E4)
+
+/* CORE */
+#define CM_FCLKEN1_CORE		PRCM_REG32(0x200)
+#define CM_FCLKEN2_CORE		PRCM_REG32(0x204)
+#define CM_FCLKEN3_CORE		PRCM_REG32(0x208)
+#define CM_ICLKEN1_CORE		PRCM_REG32(0x210)
+#define CM_ICLKEN2_CORE		PRCM_REG32(0x214)
+#define CM_ICLKEN3_CORE		PRCM_REG32(0x218)
+#define CM_ICLKEN4_CORE		PRCM_REG32(0x21C)
+#define CM_IDLEST1_CORE		PRCM_REG32(0x220)
+#define CM_IDLEST2_CORE		PRCM_REG32(0x224)
+#define CM_IDLEST3_CORE		PRCM_REG32(0x228)
+#define CM_IDLEST4_CORE		PRCM_REG32(0x22C)
+#define CM_AUTOIDLE1_CORE	PRCM_REG32(0x230)
+#define CM_AUTOIDLE2_CORE	PRCM_REG32(0x234)
+#define CM_AUTOIDLE3_CORE	PRCM_REG32(0x238)
+#define CM_AUTOIDLE4_CORE	PRCM_REG32(0x23C)
+#define CM_CLKSEL1_CORE		PRCM_REG32(0x240)
+#define CM_CLKSEL2_CORE		PRCM_REG32(0x244)
+#define CM_CLKSTCTRL_CORE	PRCM_REG32(0x248)
+#define PM_WKEN1_CORE		PRCM_REG32(0x2A0)
+#define PM_WKEN2_CORE		PRCM_REG32(0x2A4)
+#define PM_WKST1_CORE		PRCM_REG32(0x2B0)
+#define PM_WKST2_CORE		PRCM_REG32(0x2B4)
+#define PM_WKDEP_CORE		PRCM_REG32(0x2C8)
+#define PM_PWSTCTRL_CORE	PRCM_REG32(0x2E0)
+#define PM_PWSTST_CORE		PRCM_REG32(0x2E4)
+
+/* GFX */
+#define CM_FCLKEN_GFX		PRCM_REG32(0x300)
+#define CM_ICLKEN_GFX		PRCM_REG32(0x310)
+#define CM_IDLEST_GFX		PRCM_REG32(0x320)
+#define CM_CLKSEL_GFX		PRCM_REG32(0x340)
+#define CM_CLKSTCTRL_GFX	PRCM_REG32(0x348)
+#define RM_RSTCTRL_GFX		PRCM_REG32(0x350)
+#define RM_RSTST_GFX		PRCM_REG32(0x358)
+#define PM_WKDEP_GFX		PRCM_REG32(0x3C8)
+#define PM_PWSTCTRL_GFX		PRCM_REG32(0x3E0)
+#define PM_PWSTST_GFX		PRCM_REG32(0x3E4)
+
+/* WAKE-UP */
+#define CM_FCLKEN_WKUP		PRCM_REG32(0x400)
+#define CM_ICLKEN_WKUP		PRCM_REG32(0x410)
+#define CM_IDLEST_WKUP		PRCM_REG32(0x420)
+#define CM_AUTOIDLE_WKUP	PRCM_REG32(0x430)
+#define CM_CLKSEL_WKUP		PRCM_REG32(0x440)
+#define RM_RSTCTRL_WKUP		PRCM_REG32(0x450)
+#define RM_RSTTIME_WKUP		PRCM_REG32(0x454)
+#define RM_RSTST_WKUP		PRCM_REG32(0x458)
+#define PM_WKEN_WKUP		PRCM_REG32(0x4A0)
+#define PM_WKST_WKUP		PRCM_REG32(0x4B0)
+
+/* CLOCKS */
+#define CM_CLKEN_PLL		PRCM_REG32(0x500)
+#define CM_IDLEST_CKGEN		PRCM_REG32(0x520)
+#define CM_AUTOIDLE_PLL		PRCM_REG32(0x530)
+#define CM_CLKSEL1_PLL		PRCM_REG32(0x540)
+#define CM_CLKSEL2_PLL		PRCM_REG32(0x544)
+
+/* DSP */
+#define CM_FCLKEN_DSP		PRCM_REG32(0x800)
+#define CM_ICLKEN_DSP		PRCM_REG32(0x810)
+#define CM_IDLEST_DSP		PRCM_REG32(0x820)
+#define CM_AUTOIDLE_DSP		PRCM_REG32(0x830)
+#define CM_CLKSEL_DSP		PRCM_REG32(0x840)
+#define CM_CLKSTCTRL_DSP	PRCM_REG32(0x848)
+#define RM_RSTCTRL_DSP		PRCM_REG32(0x850)
+#define RM_RSTST_DSP		PRCM_REG32(0x858)
+#define PM_WKEN_DSP		PRCM_REG32(0x8A0)
+#define PM_WKDEP_DSP		PRCM_REG32(0x8C8)
+#define PM_PWSTCTRL_DSP		PRCM_REG32(0x8E0)
+#define PM_PWSTST_DSP		PRCM_REG32(0x8E4)
+#define PRCM_IRQSTATUS_DSP	PRCM_REG32(0x8F0)
+#define PRCM_IRQENABLE_DSP	PRCM_REG32(0x8F4)
+
+/* IVA */
+#define PRCM_IRQSTATUS_IVA	PRCM_REG32(0x8F8)
+#define PRCM_IRQENABLE_IVA	PRCM_REG32(0x8FC)
+
+/* Modem on 2430 */
+#define CM_FCLKEN_MDM		PRCM_REG32(0xC00)
+#define CM_ICLKEN_MDM		PRCM_REG32(0xC10)
+#define CM_IDLEST_MDM		PRCM_REG32(0xC20)
+#define CM_CLKSEL_MDM		PRCM_REG32(0xC40)
+
+/* FIXME: Move to header for 2430 */
+#define DISP_BASE		(OMAP24XX_L4_IO_BASE+0x50000)
+#define DISP_REG32(offset)	__REG32(DISP_BASE + (offset))
+
+#define OMAP24XX_GPMC_BASE	(L3_24XX_BASE + 0xa000)
+#define GPMC_BASE		(OMAP24XX_GPMC_BASE)
+#define GPMC_REG32(offset)	__REG32(GPMC_BASE + (offset))
+
+#define GPT1_BASE		(OMAP24XX_GPT1)
+#define GPT1_REG32(offset)	__REG32(GPT1_BASE + (offset))
+
+/* Misc sysconfig */
+#define DISPC_SYSCONFIG		DISP_REG32(0x410)
+#define SPI_BASE		(OMAP24XX_L4_IO_BASE+0x98000)
+#define MCSPI1_SYSCONFIG	__REG32(SPI_BASE + 0x10)
+#define MCSPI2_SYSCONFIG	__REG32(SPI_BASE+0x2000 + 0x10)
+
+//#define DSP_MMU_SYSCONFIG	0x5A000010
+#define CAMERA_MMU_SYSCONFIG	__REG32(DISP_BASE+0x2C10)
+//#define IVA_MMU_SYSCONFIG	0x5D000010
+//#define DSP_DMA_SYSCONFIG	0x00FCC02C
+#define CAMERA_DMA_SYSCONFIG	__REG32(DISP_BASE+0x282C)
+#define SYSTEM_DMA_SYSCONFIG	__REG32(DISP_BASE+0x602C)
+#define GPMC_SYSCONFIG		GPMC_REG32(0x010)
+#define MAILBOXES_SYSCONFIG	__REG32(OMAP24XX_L4_IO_BASE+0x94010)
+#define UART1_SYSCONFIG		__REG32(OMAP24XX_L4_IO_BASE+0x6A054)
+#define UART2_SYSCONFIG		__REG32(OMAP24XX_L4_IO_BASE+0x6C054)
+#define UART3_SYSCONFIG		__REG32(OMAP24XX_L4_IO_BASE+0x6E054)
+//#define IVA_SYSCONFIG		0x5C060010
+#define SDRC_SYSCONFIG		__REG32(OMAP24XX_SDRC_BASE+0x10)
+#define SMS_SYSCONFIG		__REG32(OMAP24XX_SMS_BASE+0x10)
+#define SSI_SYSCONFIG		__REG32(DISP_BASE+0x8010)
+//#define VLYNQ_SYSCONFIG	0x67FFFE10
+
+/* rkw - good cannidates for PM_ to start what nm was trying */
+#define OMAP24XX_GPT2		(OMAP24XX_L4_IO_BASE+0x2A000)
+#define OMAP24XX_GPT3		(OMAP24XX_L4_IO_BASE+0x78000)
+#define OMAP24XX_GPT4		(OMAP24XX_L4_IO_BASE+0x7A000)
+#define OMAP24XX_GPT5		(OMAP24XX_L4_IO_BASE+0x7C000)
+#define OMAP24XX_GPT6		(OMAP24XX_L4_IO_BASE+0x7E000)
+#define OMAP24XX_GPT7		(OMAP24XX_L4_IO_BASE+0x80000)
+#define OMAP24XX_GPT8		(OMAP24XX_L4_IO_BASE+0x82000)
+#define OMAP24XX_GPT9		(OMAP24XX_L4_IO_BASE+0x84000)
+#define OMAP24XX_GPT10		(OMAP24XX_L4_IO_BASE+0x86000)
+#define OMAP24XX_GPT11		(OMAP24XX_L4_IO_BASE+0x88000)
+#define OMAP24XX_GPT12		(OMAP24XX_L4_IO_BASE+0x8A000)
+
+#define GPTIMER1_SYSCONFIG	GPT1_REG32(0x010)
+#define GPTIMER2_SYSCONFIG	__REG32(OMAP24XX_GPT2 + 0x10)
+#define GPTIMER3_SYSCONFIG	__REG32(OMAP24XX_GPT3 + 0x10)
+#define GPTIMER4_SYSCONFIG	__REG32(OMAP24XX_GPT4 + 0x10)
+#define GPTIMER5_SYSCONFIG	__REG32(OMAP24XX_GPT5 + 0x10)
+#define GPTIMER6_SYSCONFIG	__REG32(OMAP24XX_GPT6 + 0x10)
+#define GPTIMER7_SYSCONFIG	__REG32(OMAP24XX_GPT7 + 0x10)
+#define GPTIMER8_SYSCONFIG	__REG32(OMAP24XX_GPT8 + 0x10)
+#define GPTIMER9_SYSCONFIG	__REG32(OMAP24XX_GPT9 + 0x10)
+#define GPTIMER10_SYSCONFIG	__REG32(OMAP24XX_GPT10 + 0x10)
+#define GPTIMER11_SYSCONFIG	__REG32(OMAP24XX_GPT11 + 0x10)
+#define GPTIMER12_SYSCONFIG	__REG32(OMAP24XX_GPT12 + 0x10)
+
+#define GPIOX_BASE(X)		(OMAP24XX_GPIO_BASE+(0x2000*((X)-1)))
+
+#define GPIO1_SYSCONFIG		__REG32((GPIOX_BASE(1)+0x10))
+#define GPIO2_SYSCONFIG		__REG32((GPIOX_BASE(2)+0x10))
+#define GPIO3_SYSCONFIG		__REG32((GPIOX_BASE(3)+0x10))
+#define GPIO4_SYSCONFIG		__REG32((GPIOX_BASE(4)+0x10))
+
+/* GP TIMER 1 */
+#define GPTIMER1_TISTAT		GPT1_REG32(0x014)
+#define GPTIMER1_TISR		GPT1_REG32(0x018)
+#define GPTIMER1_TIER		GPT1_REG32(0x01C)
+#define GPTIMER1_TWER		GPT1_REG32(0x020)
+#define GPTIMER1_TCLR		GPT1_REG32(0x024)
+#define GPTIMER1_TCRR		GPT1_REG32(0x028)
+#define GPTIMER1_TLDR		GPT1_REG32(0x02C)
+#define GPTIMER1_TTGR		GPT1_REG32(0x030)
+#define GPTIMER1_TWPS		GPT1_REG32(0x034)
+#define GPTIMER1_TMAR		GPT1_REG32(0x038)
+#define GPTIMER1_TCAR1		GPT1_REG32(0x03C)
+#define GPTIMER1_TSICR		GPT1_REG32(0x040)
+#define GPTIMER1_TCAR2		GPT1_REG32(0x044)
+
+/* rkw -- base fix up please... */
+#define GPTIMER3_TISR		__REG32(OMAP24XX_L4_IO_BASE+0x78018)
+
+/* SDRC */
+#define SDRC_DLLA_CTRL		__REG32(OMAP24XX_SDRC_BASE+0x060)
+#define SDRC_DLLA_STATUS	__REG32(OMAP24XX_SDRC_BASE+0x064)
+#define SDRC_DLLB_CTRL		__REG32(OMAP24XX_SDRC_BASE+0x068)
+#define SDRC_DLLB_STATUS	__REG32(OMAP24XX_SDRC_BASE+0x06C)
+#define SDRC_POWER		__REG32(OMAP24XX_SDRC_BASE+0x070)
+#define SDRC_MR_0		__REG32(OMAP24XX_SDRC_BASE+0x084)
+
+/* GPIO 1 */
+#define GPIO1_BASE		GPIOX_BASE(1)
+#define GPIO1_REG32(offset)	__REG32(GPIO1_BASE + (offset))
+#define GPIO1_IRQENABLE1	GPIO1_REG32(0x01C)
+#define GPIO1_IRQSTATUS1	GPIO1_REG32(0x018)
+#define GPIO1_IRQENABLE2	GPIO1_REG32(0x02C)
+#define GPIO1_IRQSTATUS2	GPIO1_REG32(0x028)
+#define GPIO1_WAKEUPENABLE	GPIO1_REG32(0x020)
+#define GPIO1_RISINGDETECT	GPIO1_REG32(0x048)
+#define GPIO1_DATAIN		GPIO1_REG32(0x038)
+#define GPIO1_OE		GPIO1_REG32(0x034)
+#define GPIO1_DATAOUT		GPIO1_REG32(0x03C)
+
+/* GPIO2 */
+#define GPIO2_BASE		GPIOX_BASE(2)
+#define GPIO2_REG32(offset)	__REG32(GPIO2_BASE + (offset))
+#define GPIO2_IRQENABLE1	GPIO2_REG32(0x01C)
+#define GPIO2_IRQSTATUS1	GPIO2_REG32(0x018)
+#define GPIO2_IRQENABLE2	GPIO2_REG32(0x02C)
+#define GPIO2_IRQSTATUS2	GPIO2_REG32(0x028)
+#define GPIO2_WAKEUPENABLE	GPIO2_REG32(0x020)
+#define GPIO2_RISINGDETECT	GPIO2_REG32(0x048)
+#define GPIO2_DATAIN		GPIO2_REG32(0x038)
+#define GPIO2_OE		GPIO2_REG32(0x034)
+#define GPIO2_DATAOUT		GPIO2_REG32(0x03C)
+
+/* GPIO 3 */
+#define GPIO3_BASE		GPIOX_BASE(3)
+#define GPIO3_REG32(offset)	__REG32(GPIO3_BASE + (offset))
+#define GPIO3_IRQENABLE1	GPIO3_REG32(0x01C)
+#define GPIO3_IRQSTATUS1	GPIO3_REG32(0x018)
+#define GPIO3_IRQENABLE2	GPIO3_REG32(0x02C)
+#define GPIO3_IRQSTATUS2	GPIO3_REG32(0x028)
+#define GPIO3_WAKEUPENABLE	GPIO3_REG32(0x020)
+#define GPIO3_RISINGDETECT	GPIO3_REG32(0x048)
+#define GPIO3_FALLINGDETECT	GPIO3_REG32(0x04C)
+#define GPIO3_DATAIN		GPIO3_REG32(0x038)
+#define GPIO3_OE		GPIO3_REG32(0x034)
+#define GPIO3_DATAOUT		GPIO3_REG32(0x03C)
+#define GPIO3_DEBOUNCENABLE	GPIO3_REG32(0x050)
+#define GPIO3_DEBOUNCINGTIME	GPIO3_REG32(0x054)
+
+/* GPIO 4 */
+#define GPIO4_BASE		GPIOX_BASE(4)
+#define GPIO4_REG32(offset)	__REG32(GPIO4_BASE + (offset))
+#define GPIO4_IRQENABLE1	GPIO4_REG32(0x01C)
+#define GPIO4_IRQSTATUS1	GPIO4_REG32(0x018)
+#define GPIO4_IRQENABLE2	GPIO4_REG32(0x02C)
+#define GPIO4_IRQSTATUS2	GPIO4_REG32(0x028)
+#define GPIO4_WAKEUPENABLE	GPIO4_REG32(0x020)
+#define GPIO4_RISINGDETECT	GPIO4_REG32(0x048)
+#define GPIO4_FALLINGDETECT	GPIO4_REG32(0x04C)
+#define GPIO4_DATAIN		GPIO4_REG32(0x038)
+#define GPIO4_OE		GPIO4_REG32(0x034)
+#define GPIO4_DATAOUT		GPIO4_REG32(0x03C)
+#define GPIO4_DEBOUNCENABLE	GPIO4_REG32(0x050)
+#define GPIO4_DEBOUNCINGTIME	GPIO4_REG32(0x054)
+
+
+/* IO CONFIG */
+#define CONTROL_BASE		(OMAP24XX_CTRL_BASE)
+#define CONTROL_REG32(offset)	__REG32(CONTROL_BASE + (offset))
+
+#define CONTROL_PADCONF_SPI1_NCS2	CONTROL_REG32(0x104)
+#define CONTROL_PADCONF_SYS_XTALOUT	CONTROL_REG32(0x134)
+#define CONTROL_PADCONF_UART1_RX	CONTROL_REG32(0x0C8)
+#define CONTROL_PADCONF_MCBSP1_DX	CONTROL_REG32(0x10C)
+#define CONTROL_PADCONF_GPMC_NCS4	CONTROL_REG32(0x090)
+#define CONTROL_PADCONF_DSS_D5		CONTROL_REG32(0x0B8)
+#define CONTROL_PADCONF_DSS_D9		CONTROL_REG32(0x0BC)
+#define CONTROL_PADCONF_DSS_D13		CONTROL_REG32(0x0C0)
+#define CONTROL_PADCONF_DSS_VSYNC	CONTROL_REG32(0x0CC)
+
+/* CONTROL */
+#define CONTROL_DEVCONF		CONTROL_REG32(0x274)
+
+/* INTERRUPT CONTROLLER */
+#define INTC_BASE		(OMAP24XX_L4_IO_BASE+0xfe000)
+#define INTC_REG32(offset)	__REG32(INTC_BASE + (offset))
+
+#define INTC1_U_BASE		INTC_REG32(0x000)
+#define INTC_MIR0		INTC_REG32(0x084)
+#define INTC_MIR_SET0		INTC_REG32(0x08C)
+#define INTC_MIR_CLEAR0		INTC_REG32(0x088)
+#define INTC_ISR_CLEAR0		INTC_REG32(0x094)
+#define INTC_MIR1		INTC_REG32(0x0A4)
+#define INTC_MIR_SET1		INTC_REG32(0x0AC)
+#define INTC_MIR_CLEAR1		INTC_REG32(0x0A8)
+#define INTC_ISR_CLEAR1		INTC_REG32(0x0B4)
+#define INTC_MIR2		INTC_REG32(0x0C4)
+#define INTC_MIR_SET2		INTC_REG32(0x0CC)
+#define INTC_MIR_CLEAR2		INTC_REG32(0x0C8)
+#define INTC_ISR_CLEAR2		INTC_REG32(0x0D4)
+#define INTC_SIR_IRQ		INTC_REG32(0x040)
+#define INTC_CONTROL		INTC_REG32(0x048)
+#define INTC_ILR11		INTC_REG32(0x12C)
+#define INTC_ILR32		INTC_REG32(0x180)
+#define INTC_ILR37		INTC_REG32(0x194)
+#define INTC_SYSCONFIG		INTC_REG32(0x010)
+
+/* RAM FIREWALL */
+#define RAMFW_BASE		(0x68005000)
+#define RAMFW_REG32(offset)	__REG32(RAMFW_BASE + (offset))
+
+#define RAMFW_REQINFOPERM0	RAMFW_REG32(0x048)
+#define RAMFW_READPERM0		RAMFW_REG32(0x050)
+#define RAMFW_WRITEPERM0	RAMFW_REG32(0x058)
+
+/* GPMC CS1 FPGA ON USER INTERFACE MODULE */
+//#define DEBUG_BOARD_LED_REGISTER 0x04000014
+
+/* GPMC CS0 */
+#define GPMC_CONFIG1_0		GPMC_REG32(0x060)
+#define GPMC_CONFIG2_0		GPMC_REG32(0x064)
+#define GPMC_CONFIG3_0		GPMC_REG32(0x068)
+#define GPMC_CONFIG4_0		GPMC_REG32(0x06C)
+#define GPMC_CONFIG5_0		GPMC_REG32(0x070)
+#define GPMC_CONFIG6_0		GPMC_REG32(0x074)
+#define GPMC_CONFIG7_0		GPMC_REG32(0x078)
+
+/* GPMC CS1 */
+#define GPMC_CONFIG1_1		GPMC_REG32(0x090)
+#define GPMC_CONFIG2_1		GPMC_REG32(0x094)
+#define GPMC_CONFIG3_1		GPMC_REG32(0x098)
+#define GPMC_CONFIG4_1		GPMC_REG32(0x09C)
+#define GPMC_CONFIG5_1		GPMC_REG32(0x0a0)
+#define GPMC_CONFIG6_1		GPMC_REG32(0x0a4)
+#define GPMC_CONFIG7_1		GPMC_REG32(0x0a8)
+
+/* DSS */
+#define DSS_CONTROL		DISP_REG32(0x040)
+#define DISPC_CONTROL		DISP_REG32(0x440)
+#define DISPC_SYSSTATUS		DISP_REG32(0x414)
+#define DISPC_IRQSTATUS		DISP_REG32(0x418)
+#define DISPC_IRQENABLE		DISP_REG32(0x41C)
+#define DISPC_CONFIG		DISP_REG32(0x444)
+#define DISPC_DEFAULT_COLOR0	DISP_REG32(0x44C)
+#define DISPC_DEFAULT_COLOR1	DISP_REG32(0x450)
+#define DISPC_TRANS_COLOR0	DISP_REG32(0x454)
+#define DISPC_TRANS_COLOR1	DISP_REG32(0x458)
+#define DISPC_LINE_NUMBER	DISP_REG32(0x460)
+#define DISPC_TIMING_H		DISP_REG32(0x464)
+#define DISPC_TIMING_V		DISP_REG32(0x468)
+#define DISPC_POL_FREQ		DISP_REG32(0x46C)
+#define DISPC_DIVISOR		DISP_REG32(0x470)
+#define DISPC_SIZE_DIG		DISP_REG32(0x478)
+#define DISPC_SIZE_LCD		DISP_REG32(0x47C)
+#define DISPC_GFX_BA0		DISP_REG32(0x480)
+#define DISPC_GFX_BA1		DISP_REG32(0x484)
+#define DISPC_GFX_POSITION	DISP_REG32(0x488)
+#define DISPC_GFX_SIZE		DISP_REG32(0x48C)
+#define DISPC_GFX_ATTRIBUTES	DISP_REG32(0x4A0)
+#define DISPC_GFX_FIFO_THRESHOLD	DISP_REG32(0x4A4)
+#define DISPC_GFX_ROW_INC	DISP_REG32(0x4AC)
+#define DISPC_GFX_PIXEL_INC	DISP_REG32(0x4B0)
+#define DISPC_GFX_WINDOW_SKIP	DISP_REG32(0x4B4)
+#define DISPC_GFX_TABLE_BA	DISP_REG32(0x4B8)
+#define DISPC_DATA_CYCLE1	DISP_REG32(0x5D4)
+#define DISPC_DATA_CYCLE2	DISP_REG32(0x5D8)
+#define DISPC_DATA_CYCLE3	DISP_REG32(0x5DC)
+
+/* Wake up define for board */
+#define GPIO97			(1 << 1)
+#define GPIO88			(1 << 24)
+
+#endif /* __ASSEMBLER__ */
+
+#endif
+
+
+
+
+
diff --git a/include/asm-arm/arch-omap/sram.h b/include/asm-arm/arch-omap/sram.h
new file mode 100644
index 0000000..e72ccbf
--- /dev/null
+++ b/include/asm-arm/arch-omap/sram.h
@@ -0,0 +1,38 @@
+/*
+ * linux/include/asm-arm/arch-omap/sram.h
+ *
+ * Interface for functions that need to be run in internal SRAM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_OMAP_SRAM_H
+#define __ARCH_ARM_OMAP_SRAM_H
+
+extern void * omap_sram_push(void * start, unsigned long size);
+extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
+
+extern void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
+				u32 base_cs, u32 force_unlock);
+extern void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val,
+				      u32 mem_type);
+extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
+
+
+/* Do not use these */
+extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl);
+extern unsigned long sram_reprogram_clock_sz;
+
+extern void sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
+			  u32 base_cs, u32 force_unlock);
+extern unsigned long sram_ddr_init_sz;
+
+extern u32 sram_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
+extern unsigned long sram_set_prcm_sz;
+
+extern void sram_reprogram_sdrc(u32 perf_level, u32 dll_val, u32 mem_type);
+extern unsigned long sram_reprogram_sdrc_sz;
+
+#endif
diff --git a/include/asm-arm/arch-omap/system.h b/include/asm-arm/arch-omap/system.h
index ff37bc2..b43cdd2 100644
--- a/include/asm-arm/arch-omap/system.h
+++ b/include/asm-arm/arch-omap/system.h
@@ -6,18 +6,21 @@
 #define __ASM_ARCH_SYSTEM_H
 #include <linux/config.h>
 #include <asm/mach-types.h>
+#include <asm/hardware/clock.h>
 #include <asm/arch/hardware.h>
-#include <asm/mach-types.h>
+#include <asm/arch/prcm.h>
+
+#ifndef CONFIG_MACH_VOICEBLUE
+#define voiceblue_reset()		do {} while (0)
+#endif
 
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode)
+static inline void omap1_arch_reset(char mode)
 {
-
-#ifdef CONFIG_ARCH_OMAP16XX
 	/*
 	 * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
 	 * "Global Software Reset Affects Traffic Controller Frequency".
@@ -27,13 +30,31 @@
 				 DPLL_CTL);
 		omap_writew(0x8, ARM_RSTCT1);
 	}
-#endif
-#ifdef CONFIG_MACH_VOICEBLUE
+
 	if (machine_is_voiceblue())
 		voiceblue_reset();
 	else
-#endif
 		omap_writew(1, ARM_RSTCT1);
 }
 
+static inline void omap2_arch_reset(char mode)
+{
+	u32 rate;
+	struct clk *vclk, *sclk;
+
+	vclk = clk_get(NULL, "virt_prcm_set");
+	sclk = clk_get(NULL, "sys_ck");
+	rate = clk_get_rate(sclk);
+	clk_set_rate(vclk, rate);	/* go to bypass for OMAP limitation */
+	RM_RSTCTRL_WKUP |= 2;
+}
+
+static inline void arch_reset(char mode)
+{
+	if (!cpu_is_omap24xx())
+		omap1_arch_reset(mode);
+	else
+		omap2_arch_reset(mode);
+}
+
 #endif
diff --git a/include/asm-arm/arch-omap/timex.h b/include/asm-arm/arch-omap/timex.h
index b61ddb4..21f2e36 100644
--- a/include/asm-arm/arch-omap/timex.h
+++ b/include/asm-arm/arch-omap/timex.h
@@ -28,6 +28,14 @@
 #if !defined(__ASM_ARCH_OMAP_TIMEX_H)
 #define __ASM_ARCH_OMAP_TIMEX_H
 
+/*
+ * OMAP 32KHz timer updates time one jiffie at a time from a secondary timer,
+ * and that's why the CLOCK_TICK_RATE is not 32768.
+ */
+#ifdef CONFIG_OMAP_32K_TIMER
+#define CLOCK_TICK_RATE		(CONFIG_OMAP_32K_TIMER_HZ)
+#else
 #define CLOCK_TICK_RATE		(HZ * 100000UL)
+#endif
 
 #endif /* __ASM_ARCH_OMAP_TIMEX_H */
diff --git a/include/asm-arm/arch-omap/uncompress.h b/include/asm-arm/arch-omap/uncompress.h
index 3545c86..c718264 100644
--- a/include/asm-arm/arch-omap/uncompress.h
+++ b/include/asm-arm/arch-omap/uncompress.h
@@ -36,10 +36,14 @@
 	volatile u8 * uart = 0;
 	int shift = 2;
 
+#ifdef CONFIG_MACH_OMAP_PALMTE
+	return;
+#endif
+
 #ifdef CONFIG_ARCH_OMAP
 #ifdef	CONFIG_OMAP_LL_DEBUG_UART3
 	uart = (volatile u8 *)(OMAP_UART3_BASE);
-#elif	CONFIG_OMAP_LL_DEBUG_UART2
+#elif defined(CONFIG_OMAP_LL_DEBUG_UART2)
 	uart = (volatile u8 *)(OMAP_UART2_BASE);
 #else
 	uart = (volatile u8 *)(OMAP_UART1_BASE);
diff --git a/include/asm-arm/arch-pxa/sharpsl.h b/include/asm-arm/arch-pxa/sharpsl.h
index 311f2bb..0b43495 100644
--- a/include/asm-arm/arch-pxa/sharpsl.h
+++ b/include/asm-arm/arch-pxa/sharpsl.h
@@ -21,12 +21,18 @@
 	void (*wait_hsync)(void);
 };
 
+
 /*
  * SharpSL Backlight
  */
-
 struct corgibl_machinfo {
 	int max_intensity;
 	void (*set_bl_intensity)(int intensity);
 };
+extern void corgibl_limit_intensity(int limit);
 
+
+/*
+ * SharpSL Battery/PM Driver
+ */
+extern void sharpsl_battery_kick(void);
diff --git a/include/asm-arm/arch-pxa/ssp.h b/include/asm-arm/arch-pxa/ssp.h
index 6ec67b0..949878c 100644
--- a/include/asm-arm/arch-pxa/ssp.h
+++ b/include/asm-arm/arch-pxa/ssp.h
@@ -18,6 +18,11 @@
 #ifndef SSP_H
 #define SSP_H
 
+/*
+ * SSP initialisation flags
+ */
+#define SSP_NO_IRQ	0x1		/* don't register an irq handler in SSP driver */
+
 struct ssp_state {
 	u32	cr0;
 	u32 cr1;
@@ -31,6 +36,7 @@
 	u32 flags;
 	u32 psp_flags;
 	u32 speed;
+	int irq;
 };
 
 int ssp_write_word(struct ssp_dev *dev, u32 data);
@@ -40,7 +46,7 @@
 void ssp_disable(struct ssp_dev *dev);
 void ssp_save_state(struct ssp_dev *dev, struct ssp_state *ssp);
 void ssp_restore_state(struct ssp_dev *dev, struct ssp_state *ssp);
-int ssp_init(struct ssp_dev *dev, u32 port);
+int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags);
 int ssp_config(struct ssp_dev *dev, u32 mode, u32 flags, u32 psp_flags, u32 speed);
 void ssp_exit(struct ssp_dev *dev);
 
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h
index 79dfab8..4544401 100644
--- a/include/asm-i386/ide.h
+++ b/include/asm-i386/ide.h
@@ -41,6 +41,12 @@
 
 static __inline__ unsigned long ide_default_io_base(int index)
 {
+	/*
+	 *	If PCI is present then it is not safe to poke around
+	 *	the other legacy IDE ports. Only 0x1f0 and 0x170 are
+	 *	defined compatibility mode ports for PCI. A user can 
+	 *	override this using ide= but we must default safe.
+	 */
 	if (pci_find_device(PCI_ANY_ID, PCI_ANY_ID, NULL) == NULL) {
 		switch(index) {
 			case 2: return 0x1e8;
diff --git a/include/asm-ppc64/abs_addr.h b/include/asm-powerpc/abs_addr.h
similarity index 94%
rename from include/asm-ppc64/abs_addr.h
rename to include/asm-powerpc/abs_addr.h
index dc3fc3f..1841510 100644
--- a/include/asm-ppc64/abs_addr.h
+++ b/include/asm-powerpc/abs_addr.h
@@ -1,5 +1,5 @@
-#ifndef _ABS_ADDR_H
-#define _ABS_ADDR_H
+#ifndef _ASM_POWERPC_ABS_ADDR_H
+#define _ASM_POWERPC_ABS_ADDR_H
 
 #include <linux/config.h>
 
@@ -70,4 +70,4 @@
 #define iseries_hv_addr(virtaddr)	\
 	(0x8000000000000000 | virt_to_abs(virtaddr))
 
-#endif /* _ABS_ADDR_H */
+#endif /* _ASM_POWERPC_ABS_ADDR_H */
diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h
new file mode 100644
index 0000000..8b133ef
--- /dev/null
+++ b/include/asm-powerpc/asm-compat.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_POWERPC_ASM_COMPAT_H
+#define _ASM_POWERPC_ASM_COMPAT_H
+
+#include <linux/config.h>
+#include <asm/types.h>
+
+#ifdef __ASSEMBLY__
+#  define stringify_in_c(...)	__VA_ARGS__
+#  define ASM_CONST(x)		x
+#else
+/* This version of stringify will deal with commas... */
+#  define __stringify_in_c(...)	#__VA_ARGS__
+#  define stringify_in_c(...)	__stringify_in_c(__VA_ARGS__) " "
+#  define __ASM_CONST(x)	x##UL
+#  define ASM_CONST(x)		__ASM_CONST(x)
+#endif
+
+#ifdef __powerpc64__
+
+/* operations for longs and pointers */
+#define PPC_LL		stringify_in_c(ld)
+#define PPC_STL		stringify_in_c(std)
+#define PPC_LCMPI	stringify_in_c(cmpdi)
+#define PPC_LONG	stringify_in_c(.llong)
+#define PPC_TLNEI	stringify_in_c(tdnei)
+#define PPC_LLARX	stringify_in_c(ldarx)
+#define PPC_STLCX	stringify_in_c(stdcx.)
+#define PPC_CNTLZL	stringify_in_c(cntlzd)
+
+#else /* 32-bit */
+
+/* operations for longs and pointers */
+#define PPC_LL		stringify_in_c(lwz)
+#define PPC_STL		stringify_in_c(stw)
+#define PPC_LCMPI	stringify_in_c(cmpwi)
+#define PPC_LONG	stringify_in_c(.long)
+#define PPC_TLNEI	stringify_in_c(twnei)
+#define PPC_LLARX	stringify_in_c(lwarx)
+#define PPC_STLCX	stringify_in_c(stwcx.)
+#define PPC_CNTLZL	stringify_in_c(cntlzw)
+
+#endif
+
+#ifdef CONFIG_IBM405_ERR77
+/* Erratum #77 on the 405 means we need a sync or dcbt before every
+ * stwcx.  The old ATOMIC_SYNC_FIX covered some but not all of this.
+ */
+#define PPC405_ERR77(ra,rb)	stringify_in_c(dcbt	ra, rb;)
+#define	PPC405_ERR77_SYNC	stringify_in_c(sync;)
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+
+#endif /* _ASM_POWERPC_ASM_COMPAT_H */
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index ed4b345..9c0b372 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -9,21 +9,13 @@
 
 #ifdef __KERNEL__
 #include <asm/synch.h>
+#include <asm/asm-compat.h>
 
 #define ATOMIC_INIT(i)		{ (i) }
 
 #define atomic_read(v)		((v)->counter)
 #define atomic_set(v,i)		(((v)->counter) = (i))
 
-/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
- * The old ATOMIC_SYNC_FIX covered some but not all of this.
- */
-#ifdef CONFIG_IBM405_ERR77
-#define PPC405_ERR77(ra,rb)	"dcbt " #ra "," #rb ";"
-#else
-#define PPC405_ERR77(ra,rb)
-#endif
-
 static __inline__ void atomic_add(int a, atomic_t *v)
 {
 	int t;
@@ -205,5 +197,183 @@
 #define smp_mb__before_atomic_inc()     smp_mb()
 #define smp_mb__after_atomic_inc()      smp_mb()
 
+#ifdef __powerpc64__
+
+typedef struct { volatile long counter; } atomic64_t;
+
+#define ATOMIC64_INIT(i)	{ (i) }
+
+#define atomic64_read(v)	((v)->counter)
+#define atomic64_set(v,i)	(((v)->counter) = (i))
+
+static __inline__ void atomic64_add(long a, atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%3		# atomic64_add\n\
+	add	%0,%2,%0\n\
+	stdcx.	%0,0,%3 \n\
+	bne-	1b"
+	: "=&r" (t), "=m" (v->counter)
+	: "r" (a), "r" (&v->counter), "m" (v->counter)
+	: "cc");
+}
+
+static __inline__ long atomic64_add_return(long a, atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+	EIEIO_ON_SMP
+"1:	ldarx	%0,0,%2		# atomic64_add_return\n\
+	add	%0,%1,%0\n\
+	stdcx.	%0,0,%2 \n\
+	bne-	1b"
+	ISYNC_ON_SMP
+	: "=&r" (t)
+	: "r" (a), "r" (&v->counter)
+	: "cc", "memory");
+
+	return t;
+}
+
+#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
+
+static __inline__ void atomic64_sub(long a, atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%3		# atomic64_sub\n\
+	subf	%0,%2,%0\n\
+	stdcx.	%0,0,%3 \n\
+	bne-	1b"
+	: "=&r" (t), "=m" (v->counter)
+	: "r" (a), "r" (&v->counter), "m" (v->counter)
+	: "cc");
+}
+
+static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+	EIEIO_ON_SMP
+"1:	ldarx	%0,0,%2		# atomic64_sub_return\n\
+	subf	%0,%1,%0\n\
+	stdcx.	%0,0,%2 \n\
+	bne-	1b"
+	ISYNC_ON_SMP
+	: "=&r" (t)
+	: "r" (a), "r" (&v->counter)
+	: "cc", "memory");
+
+	return t;
+}
+
+static __inline__ void atomic64_inc(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%2		# atomic64_inc\n\
+	addic	%0,%0,1\n\
+	stdcx.	%0,0,%2 \n\
+	bne-	1b"
+	: "=&r" (t), "=m" (v->counter)
+	: "r" (&v->counter), "m" (v->counter)
+	: "cc");
+}
+
+static __inline__ long atomic64_inc_return(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+	EIEIO_ON_SMP
+"1:	ldarx	%0,0,%1		# atomic64_inc_return\n\
+	addic	%0,%0,1\n\
+	stdcx.	%0,0,%1 \n\
+	bne-	1b"
+	ISYNC_ON_SMP
+	: "=&r" (t)
+	: "r" (&v->counter)
+	: "cc", "memory");
+
+	return t;
+}
+
+/*
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+static __inline__ void atomic64_dec(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+"1:	ldarx	%0,0,%2		# atomic64_dec\n\
+	addic	%0,%0,-1\n\
+	stdcx.	%0,0,%2\n\
+	bne-	1b"
+	: "=&r" (t), "=m" (v->counter)
+	: "r" (&v->counter), "m" (v->counter)
+	: "cc");
+}
+
+static __inline__ long atomic64_dec_return(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+	EIEIO_ON_SMP
+"1:	ldarx	%0,0,%1		# atomic64_dec_return\n\
+	addic	%0,%0,-1\n\
+	stdcx.	%0,0,%1\n\
+	bne-	1b"
+	ISYNC_ON_SMP
+	: "=&r" (t)
+	: "r" (&v->counter)
+	: "cc", "memory");
+
+	return t;
+}
+
+#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+{
+	long t;
+
+	__asm__ __volatile__(
+	EIEIO_ON_SMP
+"1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
+	addic.	%0,%0,-1\n\
+	blt-	2f\n\
+	stdcx.	%0,0,%1\n\
+	bne-	1b"
+	ISYNC_ON_SMP
+	"\n\
+2:"	: "=&r" (t)
+	: "r" (&v->counter)
+	: "cc", "memory");
+
+	return t;
+}
+
+#endif /* __powerpc64__ */
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index dc25c53..5727229 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -40,6 +40,7 @@
 
 #include <linux/compiler.h>
 #include <asm/atomic.h>
+#include <asm/asm-compat.h>
 #include <asm/synch.h>
 
 /*
@@ -52,16 +53,6 @@
 #define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
 #define BITOP_LE_SWIZZLE	((BITS_PER_LONG-1) & ~0x7)
 
-#ifdef CONFIG_PPC64
-#define LARXL		"ldarx"
-#define STCXL		"stdcx."
-#define CNTLZL		"cntlzd"
-#else
-#define LARXL		"lwarx"
-#define STCXL		"stwcx."
-#define CNTLZL		"cntlzw"
-#endif
-
 static __inline__ void set_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long old;
@@ -69,10 +60,10 @@
 	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 
 	__asm__ __volatile__(
-"1:"	LARXL "	%0,0,%3	# set_bit\n"
+"1:"	PPC_LLARX "%0,0,%3	# set_bit\n"
 	"or	%0,%0,%2\n"
 	PPC405_ERR77(0,%3)
-	STCXL "	%0,0,%3\n"
+	PPC_STLCX "%0,0,%3\n"
 	"bne-	1b"
 	: "=&r"(old), "=m"(*p)
 	: "r"(mask), "r"(p), "m"(*p)
@@ -86,10 +77,10 @@
 	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 
 	__asm__ __volatile__(
-"1:"	LARXL "	%0,0,%3	# set_bit\n"
+"1:"	PPC_LLARX "%0,0,%3	# clear_bit\n"
 	"andc	%0,%0,%2\n"
 	PPC405_ERR77(0,%3)
-	STCXL "	%0,0,%3\n"
+	PPC_STLCX "%0,0,%3\n"
 	"bne-	1b"
 	: "=&r"(old), "=m"(*p)
 	: "r"(mask), "r"(p), "m"(*p)
@@ -103,10 +94,10 @@
 	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 
 	__asm__ __volatile__(
-"1:"	LARXL "	%0,0,%3	# set_bit\n"
+"1:"	PPC_LLARX "%0,0,%3	# change_bit\n"
 	"xor	%0,%0,%2\n"
 	PPC405_ERR77(0,%3)
-	STCXL "	%0,0,%3\n"
+	PPC_STLCX "%0,0,%3\n"
 	"bne-	1b"
 	: "=&r"(old), "=m"(*p)
 	: "r"(mask), "r"(p), "m"(*p)
@@ -122,10 +113,10 @@
 
 	__asm__ __volatile__(
 	EIEIO_ON_SMP
-"1:"	LARXL "	%0,0,%3		# test_and_set_bit\n"
+"1:"	PPC_LLARX "%0,0,%3		# test_and_set_bit\n"
 	"or	%1,%0,%2 \n"
 	PPC405_ERR77(0,%3)
-	STCXL "	%1,0,%3 \n"
+	PPC_STLCX "%1,0,%3 \n"
 	"bne-	1b"
 	ISYNC_ON_SMP
 	: "=&r" (old), "=&r" (t)
@@ -144,10 +135,10 @@
 
 	__asm__ __volatile__(
 	EIEIO_ON_SMP
-"1:"	LARXL "	%0,0,%3		# test_and_clear_bit\n"
+"1:"	PPC_LLARX "%0,0,%3		# test_and_clear_bit\n"
 	"andc	%1,%0,%2 \n"
 	PPC405_ERR77(0,%3)
-	STCXL "	%1,0,%3 \n"
+	PPC_STLCX "%1,0,%3 \n"
 	"bne-	1b"
 	ISYNC_ON_SMP
 	: "=&r" (old), "=&r" (t)
@@ -166,10 +157,10 @@
 
 	__asm__ __volatile__(
 	EIEIO_ON_SMP
-"1:"	LARXL "	%0,0,%3		# test_and_change_bit\n"
+"1:"	PPC_LLARX "%0,0,%3		# test_and_change_bit\n"
 	"xor	%1,%0,%2 \n"
 	PPC405_ERR77(0,%3)
-	STCXL "	%1,0,%3 \n"
+	PPC_STLCX "%1,0,%3 \n"
 	"bne-	1b"
 	ISYNC_ON_SMP
 	: "=&r" (old), "=&r" (t)
@@ -184,9 +175,9 @@
         unsigned long old;
 
 	__asm__ __volatile__(
-"1:"	LARXL "	%0,0,%3         # set_bit\n"
+"1:"	PPC_LLARX "%0,0,%3         # set_bits\n"
 	"or	%0,%0,%2\n"
-	STCXL "	%0,0,%3\n"
+	PPC_STLCX "%0,0,%3\n"
 	"bne-	1b"
 	: "=&r" (old), "=m" (*addr)
 	: "r" (mask), "r" (addr), "m" (*addr)
@@ -268,7 +259,7 @@
 {
 	int lz;
 
-	asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x));
+	asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
 	return BITS_PER_LONG - 1 - lz;
 }
 
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index d625ee5..b001ecb 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_POWERPC_BUG_H
 #define _ASM_POWERPC_BUG_H
 
+#include <asm/asm-compat.h>
 /*
  * Define an illegal instr to trap on the bug.
  * We don't use 0 because that marks the end of a function
@@ -11,14 +12,6 @@
 
 #ifndef __ASSEMBLY__
 
-#ifdef __powerpc64__
-#define BUG_TABLE_ENTRY		".llong"
-#define BUG_TRAP_OP		"tdnei"
-#else 
-#define BUG_TABLE_ENTRY		".long"
-#define BUG_TRAP_OP		"twnei"
-#endif /* __powerpc64__ */
-
 struct bug_entry {
 	unsigned long	bug_addr;
 	long		line;
@@ -40,16 +33,16 @@
 	__asm__ __volatile__(						 \
 		"1:	twi 31,0,0\n"					 \
 		".section __bug_table,\"a\"\n"				 \
-		"\t"BUG_TABLE_ENTRY"	1b,%0,%1,%2\n"			 \
+		"\t"PPC_LONG"	1b,%0,%1,%2\n"			 \
 		".previous"						 \
 		: : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
 } while (0)
 
 #define BUG_ON(x) do {						\
 	__asm__ __volatile__(					\
-		"1:	"BUG_TRAP_OP"	%0,0\n"			\
+		"1:	"PPC_TLNEI"	%0,0\n"			\
 		".section __bug_table,\"a\"\n"			\
-		"\t"BUG_TABLE_ENTRY"	1b,%1,%2,%3\n"		\
+		"\t"PPC_LONG"	1b,%1,%2,%3\n"		\
 		".previous"					\
 		: : "r" ((long)(x)), "i" (__LINE__),		\
 		    "i" (__FILE__), "i" (__FUNCTION__));	\
@@ -57,9 +50,9 @@
 
 #define WARN_ON(x) do {						\
 	__asm__ __volatile__(					\
-		"1:	"BUG_TRAP_OP"	%0,0\n"			\
+		"1:	"PPC_TLNEI"	%0,0\n"			\
 		".section __bug_table,\"a\"\n"			\
-		"\t"BUG_TABLE_ENTRY"	1b,%1,%2,%3\n"		\
+		"\t"PPC_LONG"	1b,%1,%2,%3\n"		\
 		".previous"					\
 		: : "r" ((long)(x)),				\
 		    "i" (__LINE__ + BUG_WARNING_TRAP),		\
diff --git a/include/asm-powerpc/cache.h b/include/asm-powerpc/cache.h
new file mode 100644
index 0000000..26ce502
--- /dev/null
+++ b/include/asm-powerpc/cache.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_CACHE_H
+#define _ASM_POWERPC_CACHE_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+/* bytes per L1 cache line */
+#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+#define L1_CACHE_SHIFT		4
+#define MAX_COPY_PREFETCH	1
+#elif defined(CONFIG_PPC32)
+#define L1_CACHE_SHIFT		5
+#define MAX_COPY_PREFETCH	4
+#else /* CONFIG_PPC64 */
+#define L1_CACHE_SHIFT		7
+#endif
+
+#define	L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+
+#define	SMP_CACHE_BYTES		L1_CACHE_BYTES
+#define L1_CACHE_SHIFT_MAX	7 /* largest L1 which this arch supports */
+
+#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
+struct ppc64_caches {
+	u32	dsize;			/* L1 d-cache size */
+	u32	dline_size;		/* L1 d-cache line size	*/
+	u32	log_dline_size;
+	u32	dlines_per_page;
+	u32	isize;			/* L1 i-cache size */
+	u32	iline_size;		/* L1 i-cache line size	*/
+	u32	log_iline_size;
+	u32	ilines_per_page;
+};
+
+extern struct ppc64_caches ppc64_caches;
+#endif /* __powerpc64__ && ! __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/include/asm-powerpc/cacheflush.h b/include/asm-powerpc/cacheflush.h
new file mode 100644
index 0000000..8a740c8
--- /dev/null
+++ b/include/asm-powerpc/cacheflush.h
@@ -0,0 +1,68 @@
+/*
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_CACHEFLUSH_H
+#define _ASM_POWERPC_CACHEFLUSH_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h>
+#include <asm/cputable.h>
+
+/*
+ * No cache flushing is required when address mappings are changed,
+ * because the caches on PowerPCs are physically addressed.
+ */
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_range(vma, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
+#define flush_icache_page(vma, page)		do { } while (0)
+#define flush_cache_vmap(start, end)		do { } while (0)
+#define flush_cache_vunmap(start, end)		do { } while (0)
+
+extern void flush_dcache_page(struct page *page);
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+
+extern void __flush_icache_range(unsigned long, unsigned long);
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+		__flush_icache_range(start, stop);
+}
+
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+				    struct page *page, unsigned long addr,
+				    int len);
+extern void __flush_dcache_icache(void *page_va);
+extern void flush_dcache_icache_page(struct page *page);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
+extern void __flush_dcache_icache_phys(unsigned long physaddr);
+#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
+
+extern void flush_dcache_range(unsigned long start, unsigned long stop);
+#ifdef CONFIG_PPC32
+extern void clean_dcache_range(unsigned long start, unsigned long stop);
+extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
+#endif /* CONFIG_PPC32 */
+#ifdef CONFIG_PPC64
+extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
+extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
+#endif
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+	do { \
+		memcpy(dst, src, len); \
+		flush_icache_user_range(vma, page, vaddr, len); \
+	} while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+	memcpy(dst, src, len)
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/include/asm-ppc64/compat.h b/include/asm-powerpc/compat.h
similarity index 97%
rename from include/asm-ppc64/compat.h
rename to include/asm-powerpc/compat.h
index 6ec62cd..4db4360 100644
--- a/include/asm-ppc64/compat.h
+++ b/include/asm-powerpc/compat.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_PPC64_COMPAT_H
-#define _ASM_PPC64_COMPAT_H
+#ifndef _ASM_POWERPC_COMPAT_H
+#define _ASM_POWERPC_COMPAT_H
 /*
  * Architecture specific compatibility types
  */
@@ -49,7 +49,7 @@
 	compat_dev_t	st_dev;
 	compat_ino_t	st_ino;
 	compat_mode_t	st_mode;
-	compat_nlink_t	st_nlink;	
+	compat_nlink_t	st_nlink;
 	__compat_uid32_t	st_uid;
 	__compat_gid32_t	st_gid;
 	compat_dev_t	st_rdev;
@@ -202,4 +202,4 @@
 	compat_ulong_t __unused6;
 };
 
-#endif /* _ASM_PPC64_COMPAT_H */
+#endif /* _ASM_POWERPC_COMPAT_H */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 79a0556..04e2726 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -2,7 +2,7 @@
 #define __ASM_POWERPC_CPUTABLE_H
 
 #include <linux/config.h>
-#include <asm/ppc_asm.h> /* for ASM_CONST */
+#include <asm/asm-compat.h>
 
 #define PPC_FEATURE_32			0x80000000
 #define PPC_FEATURE_64			0x40000000
@@ -16,6 +16,10 @@
 #define PPC_FEATURE_HAS_EFP_SINGLE	0x00400000
 #define PPC_FEATURE_HAS_EFP_DOUBLE	0x00200000
 #define PPC_FEATURE_NO_TB		0x00100000
+#define PPC_FEATURE_POWER4		0x00080000
+#define PPC_FEATURE_POWER5		0x00040000
+#define PPC_FEATURE_POWER5_PLUS		0x00020000
+#define PPC_FEATURE_CELL		0x00010000
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
diff --git a/include/asm-powerpc/current.h b/include/asm-powerpc/current.h
new file mode 100644
index 0000000..82cd4a9
--- /dev/null
+++ b/include/asm-powerpc/current.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_POWERPC_CURRENT_H
+#define _ASM_POWERPC_CURRENT_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct task_struct;
+
+#ifdef __powerpc64__
+#include <asm/paca.h>
+
+#define current		(get_paca()->__current)
+
+#else
+
+/*
+ * We keep `current' in r2 for speed.
+ */
+register struct task_struct *current asm ("r2");
+
+#endif
+
+#endif /* _ASM_POWERPC_CURRENT_H */
diff --git a/include/asm-powerpc/eeh_event.h b/include/asm-powerpc/eeh_event.h
new file mode 100644
index 0000000..d168a30
--- /dev/null
+++ b/include/asm-powerpc/eeh_event.h
@@ -0,0 +1,52 @@
+/*
+ *	eeh_event.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
+ */
+
+#ifndef ASM_PPC64_EEH_EVENT_H
+#define ASM_PPC64_EEH_EVENT_H
+
+/** EEH event -- structure holding pci controller data that describes
+ *  a change in the isolation status of a PCI slot.  A pointer
+ *  to this struct is passed as the data pointer in a notify callback.
+ */
+struct eeh_event {
+	struct list_head     list;
+	struct device_node 	*dn;   /* struct device node */
+	struct pci_dev       *dev;  /* affected device */
+	int                  state;
+	int time_unavail;    /* milliseconds until device might be available */
+};
+
+/**
+ * eeh_send_failure_event - generate a PCI error event
+ * @dev pci device
+ *
+ * This routine builds a PCI error event which will be delivered
+ * to all listeners on the peh_notifier_chain.
+ *
+ * This routine can be called within an interrupt context;
+ * the actual event will be delivered in a normal context
+ * (from a workqueue).
+ */
+int eeh_send_failure_event (struct device_node *dn,
+                            struct pci_dev *dev,
+                            int reset_state,
+                            int time_unavail);
+
+#endif /* ASM_PPC64_EEH_EVENT_H */
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index 806c142..12fabbcb0 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -43,6 +43,7 @@
 #define FW_FEATURE_ISERIES	(1UL<<21)
 
 enum {
+#ifdef CONFIG_PPC64
 	FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE |
 		FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY |
 		FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM |
@@ -70,6 +71,11 @@
 		FW_FEATURE_ISERIES_ALWAYS &
 #endif
 		FW_FEATURE_POSSIBLE,
+
+#else /* CONFIG_PPC64 */
+	FW_FEATURE_POSSIBLE = 0,
+	FW_FEATURE_ALWAYS = 0,
+#endif
 };
 
 /* This is used to identify firmware features which are available
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index 37c94e5..f0319d5 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -7,13 +7,14 @@
 #include <asm/errno.h>
 #include <asm/synch.h>
 #include <asm/uaccess.h>
-#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
   __asm__ __volatile ( \
 	SYNC_ON_SMP \
 "1:	lwarx	%0,0,%2\n" \
 	insn \
+	PPC405_ERR77(0, %2) \
 "2:	stwcx.	%1,0,%2\n" \
 	"bne-	1b\n" \
 	"li	%1,0\n" \
@@ -23,7 +24,7 @@
 	".previous\n" \
 	".section __ex_table,\"a\"\n" \
 	".align 3\n" \
-	DATAL " 1b,4b,2b,4b\n" \
+	PPC_LONG "1b,4b,2b,4b\n" \
 	".previous" \
 	: "=&r" (oldval), "=&r" (ret) \
 	: "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
diff --git a/include/asm-ppc64/hvcall.h b/include/asm-powerpc/hvcall.h
similarity index 97%
rename from include/asm-ppc64/hvcall.h
rename to include/asm-powerpc/hvcall.h
index ab7c3cf..d36da61 100644
--- a/include/asm-ppc64/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_HVCALL_H
-#define _PPC64_HVCALL_H
+#ifndef _ASM_POWERPC_HVCALL_H
+#define _ASM_POWERPC_HVCALL_H
 
 #define HVSC			.long 0x44000022
 
@@ -138,7 +138,7 @@
  */
 long plpar_hcall_norets(unsigned long opcode, ...);
 
-/* 
+/*
  * Special hcall interface for ibmveth support.
  * Takes 8 input parms. Returns a rc and stores the
  * R4 return value in *out1.
@@ -153,11 +153,11 @@
 			   unsigned long arg7,
 			   unsigned long arg8,
 			   unsigned long *out1);
- 
+
 /* plpar_hcall_4out()
  *
- * same as plpar_hcall except with 4 output arguments.  
- * 
+ * same as plpar_hcall except with 4 output arguments.
+ *
  */
 long plpar_hcall_4out(unsigned long opcode,
 		      unsigned long arg1,
@@ -170,4 +170,4 @@
 		      unsigned long *out4);
 
 #endif /* __ASSEMBLY__ */
-#endif /* _PPC64_HVCALL_H */
+#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h
index c37b31b..26b89d8 100644
--- a/include/asm-powerpc/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -12,7 +12,6 @@
 #include <asm/processor.h>
 
 extern void timer_interrupt(struct pt_regs *);
-extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
 
 #ifdef CONFIG_PPC_ISERIES
 
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index b3935ea..c9fbced 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -429,7 +429,6 @@
 #define NR_MASK_WORDS	((NR_IRQS + 31) / 32)
 /* pedantic: these are long because they are used with set_bit --RR */
 extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
-extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
 extern atomic_t ppc_n_lost_interrupts;
 
 #define virt_irq_create_mapping(x)	(x)
@@ -488,8 +487,8 @@
 
 extern void irq_ctx_init(void);
 extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_IRQ_event(int irq, struct pt_regs *regs,
-			struct irqaction *action, struct thread_info *tp);
+extern int call___do_IRQ(int irq, struct pt_regs *regs,
+		struct thread_info *tp);
 
 #define __ARCH_HAS_DO_SOFTIRQ
 
diff --git a/include/asm-ppc64/lppaca.h b/include/asm-powerpc/lppaca.h
similarity index 97%
rename from include/asm-ppc64/lppaca.h
rename to include/asm-powerpc/lppaca.h
index 9e2a6c0..c1bedab 100644
--- a/include/asm-ppc64/lppaca.h
+++ b/include/asm-powerpc/lppaca.h
@@ -16,8 +16,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
-#ifndef _ASM_LPPACA_H
-#define _ASM_LPPACA_H
+#ifndef _ASM_POWERPC_LPPACA_H
+#define _ASM_POWERPC_LPPACA_H
 
 //=============================================================================
 //
@@ -28,8 +28,7 @@
 //----------------------------------------------------------------------------
 #include <asm/types.h>
 
-struct lppaca
-{
+struct lppaca {
 //=============================================================================
 // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
 // NOTE: The xDynXyz fields are fields that will be dynamically changed by
@@ -129,4 +128,4 @@
 	u8	pmc_save_area[256];	// PMC interrupt Area           x00-xFF
 };
 
-#endif /* _ASM_LPPACA_H */
+#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/include/asm-ppc64/paca.h b/include/asm-powerpc/paca.h
similarity index 95%
rename from include/asm-ppc64/paca.h
rename to include/asm-powerpc/paca.h
index bccacd6..92c765c 100644
--- a/include/asm-ppc64/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -1,11 +1,8 @@
-#ifndef _PPC64_PACA_H
-#define _PPC64_PACA_H
-
 /*
- * include/asm-ppc64/paca.h
+ * include/asm-powerpc/paca.h
  *
- * This control block defines the PACA which defines the processor 
- * specific data for each logical processor on the system.  
+ * This control block defines the PACA which defines the processor
+ * specific data for each logical processor on the system.
  * There are some pointers defined that are utilized by PLIC.
  *
  * C 2001 PPC 64 Team, IBM Corp
@@ -14,7 +11,9 @@
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
- */    
+ */
+#ifndef _ASM_POWERPC_PACA_H
+#define _ASM_POWERPC_PACA_H
 
 #include	<linux/config.h>
 #include	<asm/types.h>
@@ -118,4 +117,4 @@
 
 extern struct paca_struct paca[];
 
-#endif /* _PPC64_PACA_H */
+#endif /* _ASM_POWERPC_PACA_H */
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index 13aacff..9896fad 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -26,6 +26,10 @@
 
 extern struct pci_dev *ppc64_isabridge_dev;	/* may be NULL if no ISA bus */
 
+/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
+#define BUID_HI(buid) ((buid) >> 32)
+#define BUID_LO(buid) ((buid) & 0xffffffff)
+
 /* PCI device_node operations */
 struct device_node;
 typedef void *(*traverse_func)(struct device_node *me, void *data);
@@ -36,10 +40,6 @@
 void pci_devs_phb_init_dynamic(struct pci_controller *phb);
 void __devinit scan_phb(struct pci_controller *hose);
 
-/* PCI address cache management routines */
-void pci_addr_cache_insert_device(struct pci_dev *dev);
-void pci_addr_cache_remove_device(struct pci_dev *dev);
-
 /* From rtas_pci.h */
 void init_pci_config_tokens (void);
 unsigned long get_phb_buid (struct device_node *);
@@ -52,4 +52,48 @@
 extern unsigned long pci_assign_all_buses;
 extern int pci_read_irq_line(struct pci_dev *pci_dev);
 
+/* ---- EEH internal-use-only related routines ---- */
+#ifdef CONFIG_EEH
+/**
+ * rtas_set_slot_reset -- unfreeze a frozen slot
+ *
+ * Clear the EEH-frozen condition on a slot.  This routine
+ * does this by asserting the PCI #RST line for 1/8th of
+ * a second; this routine will sleep while the adapter is
+ * being reset.
+ */
+void rtas_set_slot_reset (struct pci_dn *);
+
+/** 
+ * eeh_restore_bars - Restore device configuration info.
+ *
+ * A reset of a PCI device will clear out its config space.
+ * This routines will restore the config space for this
+ * device, and is children, to values previously obtained
+ * from the firmware.
+ */
+void eeh_restore_bars(struct pci_dn *);
+
+/**
+ * rtas_configure_bridge -- firmware initialization of pci bridge
+ *
+ * Ask the firmware to configure all PCI bridges devices
+ * located behind the indicated node. Required after a
+ * pci device reset. Does essentially the same hing as
+ * eeh_restore_bars, but for brdges, and lets firmware 
+ * do the work.
+ */
+void rtas_configure_bridge(struct pci_dn *);
+
+int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
+
+/**
+ * mark and clear slots: find "partition endpoint" PE and set or 
+ * clear the flags for each subnode of the PE.
+ */
+void eeh_mark_slot (struct device_node *dn, int mode_flag);
+void eeh_clear_slot (struct device_node *dn, int mode_flag);
+
+#endif
+
 #endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index c534ca4..c27baa0 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -6,8 +6,13 @@
 
 #include <linux/stringify.h>
 #include <linux/config.h>
+#include <asm/asm-compat.h>
 
-#ifdef __ASSEMBLY__
+#ifndef __ASSEMBLY__
+#error __FILE__ should only be used in assembler files
+#else
+
+#define SZL			(BITS_PER_LONG/8)
 
 /*
  * Macros for storing registers into and loading registers from
@@ -184,12 +189,6 @@
 	oris    reg,reg,(label)@h;                      \
 	ori     reg,reg,(label)@l;
 
-/* operations for longs and pointers */
-#define LDL	ld
-#define STL	std
-#define CMPI	cmpdi
-#define SZL	8
-
 /* offsets for stack frame layout */
 #define LRSAVE	16
 
@@ -203,12 +202,6 @@
 
 #define OFF(name)	name@l
 
-/* operations for longs and pointers */
-#define LDL	lwz
-#define STL	stw
-#define CMPI	cmpwi
-#define SZL	4
-
 /* offsets for stack frame layout */
 #define LRSAVE	4
 
@@ -266,15 +259,6 @@
 #endif
 
 
-#ifdef CONFIG_IBM405_ERR77
-#define PPC405_ERR77(ra,rb)	dcbt	ra, rb;
-#define	PPC405_ERR77_SYNC	sync;
-#else
-#define PPC405_ERR77(ra,rb)
-#define PPC405_ERR77_SYNC
-#endif
-
-
 #ifdef CONFIG_IBM440EP_ERR42
 #define PPC440EP_ERR42 isync
 #else
@@ -502,17 +486,6 @@
 #define N_SLINE	68
 #define N_SO	100
 
-#define ASM_CONST(x) x
-#else
-  #define __ASM_CONST(x) x##UL
-  #define ASM_CONST(x) __ASM_CONST(x)
-
-#ifdef CONFIG_PPC64
-#define DATAL	".llong"
-#else
-#define DATAL	".long"
-#endif
-
 #endif /*  __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index 1dc4bf7..f6f186b 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -17,65 +17,71 @@
 #include <linux/compiler.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
-#ifdef CONFIG_PPC64
-#include <asm/systemcfg.h>
-#endif
 
-#ifdef CONFIG_PPC32
-/* 32-bit platform types */
-/* We only need to define a new _MACH_xxx for machines which are part of
- * a configuration which supports more than one type of different machine.
- * This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac.
- * -- Tom
+/* We do _not_ want to define new machine types at all, those must die
+ * in favor of using the device-tree
+ * -- BenH.
  */
-#define _MACH_prep	0x00000001
-#define _MACH_Pmac	0x00000002	/* pmac or pmac clone (non-chrp) */
-#define _MACH_chrp	0x00000004	/* chrp machine */
 
-/* see residual.h for these */
+/* Platforms codes (to be obsoleted) */
+#define PLATFORM_PSERIES	0x0100
+#define PLATFORM_PSERIES_LPAR	0x0101
+#define PLATFORM_ISERIES_LPAR	0x0201
+#define PLATFORM_LPAR		0x0001
+#define PLATFORM_POWERMAC	0x0400
+#define PLATFORM_MAPLE		0x0500
+#define PLATFORM_PREP		0x0600
+#define PLATFORM_CHRP		0x0700
+#define PLATFORM_CELL		0x1000
+
+/* Compat platform codes for 32 bits */
+#define _MACH_prep	PLATFORM_PREP
+#define _MACH_Pmac	PLATFORM_POWERMAC
+#define _MACH_chrp	PLATFORM_CHRP
+
+/* PREP sub-platform types see residual.h for these */
 #define _PREP_Motorola	0x01	/* motorola prep */
 #define _PREP_Firm	0x02	/* firmworks prep */
 #define _PREP_IBM	0x00	/* ibm prep */
 #define _PREP_Bull	0x03	/* bull prep */
 
-/* these are arbitrary */
+/* CHRP sub-platform types. These are arbitrary */
 #define _CHRP_Motorola	0x04	/* motorola chrp, the cobra */
 #define _CHRP_IBM	0x05	/* IBM chrp, the longtrail and longtrail 2 */
 #define _CHRP_Pegasos	0x06	/* Genesi/bplan's Pegasos and Pegasos2 */
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#define platform_is_pseries()	(_machine == PLATFORM_PSERIES || \
+				 _machine == PLATFORM_PSERIES_LPAR)
+#define platform_is_lpar()	(!!(_machine & PLATFORM_LPAR))
+
+#if defined(CONFIG_PPC_MULTIPLATFORM)
 extern int _machine;
 
+#ifdef CONFIG_PPC32
+
 /* what kind of prep workstation we are */
 extern int _prep_type;
 extern int _chrp_type;
 
 /*
  * This is used to identify the board type from a given PReP board
- * vendor. Board revision is also made available.
+ * vendor. Board revision is also made available. This will be moved
+ * elsewhere soon
  */
 extern unsigned char ucSystemType;
 extern unsigned char ucBoardRev;
 extern unsigned char ucBoardRevMaj, ucBoardRevMin;
+
+#endif /* CONFIG_PPC32 */
+
+#elif defined(CONFIG_PPC_ISERIES)
+/*
+ * iSeries is soon to become MULTIPLATFORM hopefully ...
+ */
+#define _machine PLATFORM_ISERIES_LPAR
 #else
 #define _machine 0
 #endif /* CONFIG_PPC_MULTIPLATFORM */
-#endif /* CONFIG_PPC32 */
-
-#ifdef CONFIG_PPC64
-/* Platforms supported by PPC64 */
-#define PLATFORM_PSERIES      0x0100
-#define PLATFORM_PSERIES_LPAR 0x0101
-#define PLATFORM_ISERIES_LPAR 0x0201
-#define PLATFORM_LPAR         0x0001
-#define PLATFORM_POWERMAC     0x0400
-#define PLATFORM_MAPLE        0x0500
-#define PLATFORM_CELL         0x1000
-
-/* Compatibility with drivers coming from PPC32 world */
-#define _machine	(systemcfg->platform)
-#define _MACH_Pmac	PLATFORM_POWERMAC
-#endif
 
 /*
  * Default implementation of macro that returns current
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index 489cf4c..eb392d0 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -16,7 +16,11 @@
 /* Pickup Book E specific registers. */
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 #include <asm/reg_booke.h>
-#endif
+#endif /* CONFIG_BOOKE || CONFIG_40x */
+
+#ifdef CONFIG_8xx
+#include <asm/reg_8xx.h>
+#endif /* CONFIG_8xx */
 
 #define MSR_SF_LG	63              /* Enable 64 bit mode */
 #define MSR_ISF_LG	61              /* Interrupt 64b mode valid on 630 */
@@ -359,6 +363,7 @@
 #define SPRN_RPA	0x3D6	/* Required Physical Address Register */
 #define SPRN_SDA	0x3BF	/* Sampled Data Address Register */
 #define SPRN_SDR1	0x019	/* MMU Hash Base Register */
+#define SPRN_ASR	0x118   /* Address Space Register */
 #define SPRN_SIA	0x3BB	/* Sampled Instruction Address Register */
 #define SPRN_SPRG0	0x110	/* Special Purpose Register General 0 */
 #define SPRN_SPRG1	0x111	/* Special Purpose Register General 1 */
diff --git a/include/asm-powerpc/reg_8xx.h b/include/asm-powerpc/reg_8xx.h
new file mode 100644
index 0000000..e8ea346
--- /dev/null
+++ b/include/asm-powerpc/reg_8xx.h
@@ -0,0 +1,42 @@
+/*
+ * Contains register definitions common to PowerPC 8xx CPUs.  Notice
+ */
+#ifndef _ASM_POWERPC_REG_8xx_H
+#define _ASM_POWERPC_REG_8xx_H
+
+/* Cache control on the MPC8xx is provided through some additional
+ * special purpose registers.
+ */
+#define SPRN_IC_CST	560	/* Instruction cache control/status */
+#define SPRN_IC_ADR	561	/* Address needed for some commands */
+#define SPRN_IC_DAT	562	/* Read-only data register */
+#define SPRN_DC_CST	568	/* Data cache control/status */
+#define SPRN_DC_ADR	569	/* Address needed for some commands */
+#define SPRN_DC_DAT	570	/* Read-only data register */
+
+/* Commands.  Only the first few are available to the instruction cache.
+*/
+#define	IDC_ENABLE	0x02000000	/* Cache enable */
+#define IDC_DISABLE	0x04000000	/* Cache disable */
+#define IDC_LDLCK	0x06000000	/* Load and lock */
+#define IDC_UNLINE	0x08000000	/* Unlock line */
+#define IDC_UNALL	0x0a000000	/* Unlock all */
+#define IDC_INVALL	0x0c000000	/* Invalidate all */
+
+#define DC_FLINE	0x0e000000	/* Flush data cache line */
+#define DC_SFWT		0x01000000	/* Set forced writethrough mode */
+#define DC_CFWT		0x03000000	/* Clear forced writethrough mode */
+#define DC_SLES		0x05000000	/* Set little endian swap mode */
+#define DC_CLES		0x07000000	/* Clear little endian swap mode */
+
+/* Status.
+*/
+#define IDC_ENABLED	0x80000000	/* Cache is enabled */
+#define IDC_CERR1	0x00200000	/* Cache error 1 */
+#define IDC_CERR2	0x00100000	/* Cache error 2 */
+#define IDC_CERR3	0x00080000	/* Cache error 3 */
+
+#define DC_DFWT		0x40000000	/* Data cache is forced write through */
+#define DC_LES		0x20000000	/* Caches are little endian mode */
+
+#endif /* _ASM_POWERPC_REG_8xx_H */
diff --git a/include/asm-ppc/signal.h b/include/asm-powerpc/signal.h
similarity index 82%
rename from include/asm-ppc/signal.h
rename to include/asm-powerpc/signal.h
index caf6ede..694c8d2 100644
--- a/include/asm-ppc/signal.h
+++ b/include/asm-powerpc/signal.h
@@ -1,18 +1,11 @@
-#ifndef _ASMPPC_SIGNAL_H
-#define _ASMPPC_SIGNAL_H
+#ifndef _ASM_POWERPC_SIGNAL_H
+#define _ASM_POWERPC_SIGNAL_H
 
-#ifdef __KERNEL__
 #include <linux/types.h>
-#endif /* __KERNEL__ */
-
-/* Avoid too many header ordering problems.  */
-struct siginfo;
-
-/* Most things should be clean enough to redefine this at will, if care
-   is taken to make libc match.  */
+#include <linux/config.h>
 
 #define _NSIG		64
-#define _NSIG_BPW	32
+#define _NSIG_BPW	BITS_PER_LONG
 #define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
 
 typedef unsigned long old_sigset_t;		/* at least 32 bits */
@@ -77,19 +70,19 @@
  * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
  * Unix names RESETHAND and NODEFER respectively.
  */
-#define SA_NOCLDSTOP	0x00000001
-#define SA_NOCLDWAIT	0x00000002
-#define SA_SIGINFO	0x00000004
-#define SA_ONSTACK	0x08000000
-#define SA_RESTART	0x10000000
-#define SA_NODEFER	0x40000000
-#define SA_RESETHAND	0x80000000
+#define SA_NOCLDSTOP	0x00000001U
+#define SA_NOCLDWAIT	0x00000002U
+#define SA_SIGINFO	0x00000004U
+#define SA_ONSTACK	0x08000000U
+#define SA_RESTART	0x10000000U
+#define SA_NODEFER	0x40000000U
+#define SA_RESETHAND	0x80000000U
 
 #define SA_NOMASK	SA_NODEFER
 #define SA_ONESHOT	SA_RESETHAND
-#define SA_INTERRUPT	0x20000000 /* dummy -- ignored */
+#define SA_INTERRUPT	0x20000000u /* dummy -- ignored */
 
-#define SA_RESTORER	0x04000000
+#define SA_RESTORER	0x04000000U
 
 /*
  * sigaltstack controls
@@ -127,10 +120,13 @@
 } stack_t;
 
 #ifdef __KERNEL__
-#include <asm/sigcontext.h>
+struct pt_regs;
+extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
+extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
 #define ptrace_signal_deliver(regs, cookie) do { } while (0)
 #endif /* __KERNEL__ */
 
+#ifndef __powerpc64__
 /*
  * These are parameters to dbg_sigreturn syscall.  They enable or
  * disable certain debugging things that can be done from signal
@@ -149,5 +145,6 @@
 
 /* Enable or disable branch tracing.  The value sets the state. */
 #define SIG_DBG_BRANCH_TRACING		2
+#endif /* ! __powerpc64__ */
 
-#endif
+#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h
index 1c95ab9..58d2aab 100644
--- a/include/asm-powerpc/sparsemem.h
+++ b/include/asm-powerpc/sparsemem.h
@@ -11,6 +11,10 @@
 #define MAX_PHYSADDR_BITS       38
 #define MAX_PHYSMEM_BITS        36
 
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void create_section_mapping(unsigned long start, unsigned long end);
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
 #endif /* CONFIG_SPARSEMEM */
 
 #endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 3536a5c..5341b75 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -8,7 +8,6 @@
 #include <linux/kernel.h>
 
 #include <asm/hw_irq.h>
-#include <asm/ppc_asm.h>
 #include <asm/atomic.h>
 
 /*
@@ -180,6 +179,7 @@
 extern unsigned int rtas_data;
 extern int mem_init_done;	/* set on boot once kmalloc can be called */
 extern unsigned long memory_limit;
+extern unsigned long klimit;
 
 extern int powersave_nap;	/* set if nap mode can be used in idle loop */
 
diff --git a/include/asm-ppc64/systemcfg.h b/include/asm-powerpc/systemcfg.h
similarity index 95%
rename from include/asm-ppc64/systemcfg.h
rename to include/asm-powerpc/systemcfg.h
index 9b86b53..36b5cbe 100644
--- a/include/asm-ppc64/systemcfg.h
+++ b/include/asm-powerpc/systemcfg.h
@@ -1,7 +1,7 @@
 #ifndef _SYSTEMCFG_H
 #define _SYSTEMCFG_H
 
-/* 
+/*
  * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM
  *
  * This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
 
 /* Change Activity:
  * 2002/09/30 : bergner  : Created
- * End Change Activity 
+ * End Change Activity
  */
 
 /*
@@ -56,7 +56,7 @@
 };
 
 #ifdef __KERNEL__
-extern struct systemcfg *systemcfg;
+extern struct systemcfg *_systemcfg; /* to be renamed */
 #endif
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/asm-ppc64/tce.h b/include/asm-powerpc/tce.h
similarity index 95%
rename from include/asm-ppc64/tce.h
rename to include/asm-powerpc/tce.h
index d40b6b4..d099d52 100644
--- a/include/asm-ppc64/tce.h
+++ b/include/asm-powerpc/tce.h
@@ -18,8 +18,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
-#ifndef _ASM_TCE_H
-#define _ASM_TCE_H
+#ifndef _ASM_POWERPC_TCE_H
+#define _ASM_POWERPC_TCE_H
 
 /*
  * Tces come in two formats, one for the virtual bus and a different
@@ -61,4 +61,4 @@
 };
 
 
-#endif
+#endif /* _ASM_POWERPC_TCE_H */
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
index 33af730..3872e92 100644
--- a/include/asm-powerpc/uaccess.h
+++ b/include/asm-powerpc/uaccess.h
@@ -120,14 +120,6 @@
 
 extern long __put_user_bad(void);
 
-#ifdef __powerpc64__
-#define __EX_TABLE_ALIGN	"3"
-#define __EX_TABLE_TYPE		"llong"
-#else
-#define __EX_TABLE_ALIGN	"2"
-#define __EX_TABLE_TYPE		"long"
-#endif
-
 /*
  * We don't tell gcc that we are accessing memory, but this is OK
  * because we do not write to any memory gcc knows about, so there
@@ -142,11 +134,12 @@
 		"	b 2b\n"					\
 		".previous\n"					\
 		".section __ex_table,\"a\"\n"			\
-		"	.align " __EX_TABLE_ALIGN "\n"		\
-		"	."__EX_TABLE_TYPE" 1b,3b\n"		\
+		"	.balign %5\n"				\
+			PPC_LONG "1b,3b\n"			\
 		".previous"					\
 		: "=r" (err)					\
-		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\
+		  "i"(sizeof(unsigned long)))
 
 #ifdef __powerpc64__
 #define __put_user_asm2(x, ptr, retval)				\
@@ -162,12 +155,13 @@
 		"	b 3b\n"					\
 		".previous\n"					\
 		".section __ex_table,\"a\"\n"			\
-		"	.align " __EX_TABLE_ALIGN "\n"		\
-		"	." __EX_TABLE_TYPE " 1b,4b\n"		\
-		"	." __EX_TABLE_TYPE " 2b,4b\n"		\
+		"	.balign %5\n"				\
+			PPC_LONG "1b,4b\n"			\
+			PPC_LONG "2b,4b\n"			\
 		".previous"					\
 		: "=r" (err)					\
-		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\
+		  "i"(sizeof(unsigned long)))
 #endif /* __powerpc64__ */
 
 #define __put_user_size(x, ptr, size, retval)			\
@@ -213,11 +207,12 @@
 		"	b 2b\n"				\
 		".previous\n"				\
 		".section __ex_table,\"a\"\n"		\
-		"	.align "__EX_TABLE_ALIGN "\n"	\
-		"	." __EX_TABLE_TYPE " 1b,3b\n"	\
+		"	.balign %5\n"			\
+			PPC_LONG "1b,3b\n"		\
 		".previous"				\
 		: "=r" (err), "=r" (x)			\
-		: "b" (addr), "i" (-EFAULT), "0" (err))
+		: "b" (addr), "i" (-EFAULT), "0" (err),	\
+		  "i"(sizeof(unsigned long)))
 
 #ifdef __powerpc64__
 #define __get_user_asm2(x, addr, err)			\
@@ -235,12 +230,13 @@
 		"	b 3b\n"				\
 		".previous\n"				\
 		".section __ex_table,\"a\"\n"		\
-		"	.align " __EX_TABLE_ALIGN "\n"	\
-		"	." __EX_TABLE_TYPE " 1b,4b\n"	\
-		"	." __EX_TABLE_TYPE " 2b,4b\n"	\
+		"	.balign %5\n"			\
+			PPC_LONG "1b,4b\n"		\
+			PPC_LONG "2b,4b\n"		\
 		".previous"				\
 		: "=r" (err), "=&r" (x)			\
-		: "b" (addr), "i" (-EFAULT), "0" (err))
+		: "b" (addr), "i" (-EFAULT), "0" (err),	\
+		  "i"(sizeof(unsigned long)))
 #endif /* __powerpc64__ */
 
 #define __get_user_size(x, ptr, size, retval)			\
diff --git a/include/asm-powerpc/xmon.h b/include/asm-powerpc/xmon.h
index ace2072..43f7129 100644
--- a/include/asm-powerpc/xmon.h
+++ b/include/asm-powerpc/xmon.h
@@ -7,7 +7,6 @@
 extern int xmon(struct pt_regs *excp);
 extern void xmon_printf(const char *fmt, ...);
 extern void xmon_init(int);
-extern void xmon_map_scc(void);
 
 #endif
 #endif
diff --git a/include/asm-ppc/cache.h b/include/asm-ppc/cache.h
deleted file mode 100644
index 7a157d0..0000000
--- a/include/asm-ppc/cache.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * include/asm-ppc/cache.h
- */
-#ifdef __KERNEL__
-#ifndef __ARCH_PPC_CACHE_H
-#define __ARCH_PPC_CACHE_H
-
-#include <linux/config.h>
-
-/* bytes per L1 cache line */
-#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
-#define L1_CACHE_SHIFT	4
-#define MAX_COPY_PREFETCH	1
-#elif defined(CONFIG_PPC64BRIDGE)
-#define L1_CACHE_SHIFT	7
-#define MAX_COPY_PREFETCH	1
-#else
-#define L1_CACHE_SHIFT	5
-#define MAX_COPY_PREFETCH	4
-#endif
-
-#define	L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
-
-#define	SMP_CACHE_BYTES L1_CACHE_BYTES
-#define L1_CACHE_SHIFT_MAX 7	/* largest L1 which this arch supports */
-
-#define	L1_CACHE_ALIGN(x)       (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-#define	L1_CACHE_PAGES		8
-
-#ifndef __ASSEMBLY__
-extern void clean_dcache_range(unsigned long start, unsigned long stop);
-extern void flush_dcache_range(unsigned long start, unsigned long stop);
-extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
-extern void flush_dcache_all(void);
-#endif /* __ASSEMBLY__ */
-
-/* prep registers for L2 */
-#define CACHECRBA       0x80000823      /* Cache configuration register address */
-#define L2CACHE_MASK	0x03	/* Mask for 2 L2 Cache bits */
-#define L2CACHE_512KB	0x00	/* 512KB */
-#define L2CACHE_256KB	0x01	/* 256KB */
-#define L2CACHE_1MB	0x02	/* 1MB */
-#define L2CACHE_NONE	0x03	/* NONE */
-#define L2CACHE_PARITY  0x08    /* Mask for L2 Cache Parity Protected bit */
-
-#ifdef CONFIG_8xx
-/* Cache control on the MPC8xx is provided through some additional
- * special purpose registers.
- */
-#define SPRN_IC_CST	560	/* Instruction cache control/status */
-#define SPRN_IC_ADR	561	/* Address needed for some commands */
-#define SPRN_IC_DAT	562	/* Read-only data register */
-#define SPRN_DC_CST	568	/* Data cache control/status */
-#define SPRN_DC_ADR	569	/* Address needed for some commands */
-#define SPRN_DC_DAT	570	/* Read-only data register */
-
-/* Commands.  Only the first few are available to the instruction cache.
-*/
-#define	IDC_ENABLE	0x02000000	/* Cache enable */
-#define IDC_DISABLE	0x04000000	/* Cache disable */
-#define IDC_LDLCK	0x06000000	/* Load and lock */
-#define IDC_UNLINE	0x08000000	/* Unlock line */
-#define IDC_UNALL	0x0a000000	/* Unlock all */
-#define IDC_INVALL	0x0c000000	/* Invalidate all */
-
-#define DC_FLINE	0x0e000000	/* Flush data cache line */
-#define DC_SFWT		0x01000000	/* Set forced writethrough mode */
-#define DC_CFWT		0x03000000	/* Clear forced writethrough mode */
-#define DC_SLES		0x05000000	/* Set little endian swap mode */
-#define DC_CLES		0x07000000	/* Clear little endian swap mode */
-
-/* Status.
-*/
-#define IDC_ENABLED	0x80000000	/* Cache is enabled */
-#define IDC_CERR1	0x00200000	/* Cache error 1 */
-#define IDC_CERR2	0x00100000	/* Cache error 2 */
-#define IDC_CERR3	0x00080000	/* Cache error 3 */
-
-#define DC_DFWT		0x40000000	/* Data cache is forced write through */
-#define DC_LES		0x20000000	/* Caches are little endian mode */
-#endif /* CONFIG_8xx */
-
-#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/cacheflush.h b/include/asm-ppc/cacheflush.h
deleted file mode 100644
index 6a243ef..0000000
--- a/include/asm-ppc/cacheflush.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * include/asm-ppc/cacheflush.h
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-#ifdef __KERNEL__
-#ifndef _PPC_CACHEFLUSH_H
-#define _PPC_CACHEFLUSH_H
-
-#include <linux/mm.h>
-
-/*
- * No cache flushing is required when address mappings are
- * changed, because the caches on PowerPCs are physically
- * addressed.  -- paulus
- * Also, when SMP we use the coherency (M) bit of the
- * BATs and PTEs.  -- Cort
- */
-#define flush_cache_all()		do { } while (0)
-#define flush_cache_mm(mm)		do { } while (0)
-#define flush_cache_range(vma, a, b)	do { } while (0)
-#define flush_cache_page(vma, p, pfn)	do { } while (0)
-#define flush_icache_page(vma, page)	do { } while (0)
-#define flush_cache_vmap(start, end)	do { } while (0)
-#define flush_cache_vunmap(start, end)	do { } while (0)
-
-extern void flush_dcache_page(struct page *page);
-#define flush_dcache_mmap_lock(mapping)		do { } while (0)
-#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
-
-extern void flush_icache_range(unsigned long, unsigned long);
-extern void flush_icache_user_range(struct vm_area_struct *vma,
-		struct page *page, unsigned long addr, int len);
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
-     flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
-
-extern void __flush_dcache_icache(void *page_va);
-extern void __flush_dcache_icache_phys(unsigned long physaddr);
-extern void flush_dcache_icache_page(struct page *page);
-#endif /* _PPC_CACHEFLUSH_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/current.h b/include/asm-ppc/current.h
deleted file mode 100644
index 8d41501..0000000
--- a/include/asm-ppc/current.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _PPC_CURRENT_H
-#define _PPC_CURRENT_H
-
-/*
- * We keep `current' in r2 for speed.
- */
-register struct task_struct *current asm ("r2");
-
-#endif /* !(_PPC_CURRENT_H) */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/cache.h b/include/asm-ppc64/cache.h
deleted file mode 100644
index 92140a7..0000000
--- a/include/asm-ppc64/cache.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef __ARCH_PPC64_CACHE_H
-#define __ARCH_PPC64_CACHE_H
-
-#include <asm/types.h>
-
-/* bytes per L1 cache line */
-#define L1_CACHE_SHIFT	7
-#define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
-
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define L1_CACHE_SHIFT_MAX 7	/* largest L1 which this arch supports */
-
-#ifndef __ASSEMBLY__
-
-struct ppc64_caches {
-	u32	dsize;			/* L1 d-cache size */
-	u32	dline_size;		/* L1 d-cache line size	*/
-	u32	log_dline_size;
-	u32	dlines_per_page;
-	u32	isize;			/* L1 i-cache size */
-	u32	iline_size;		/* L1 i-cache line size	*/
-	u32	log_iline_size;
-	u32	ilines_per_page;
-};
-
-extern struct ppc64_caches ppc64_caches;
-
-#endif
-
-#endif
diff --git a/include/asm-ppc64/cacheflush.h b/include/asm-ppc64/cacheflush.h
deleted file mode 100644
index ffbc08b..0000000
--- a/include/asm-ppc64/cacheflush.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef _PPC64_CACHEFLUSH_H
-#define _PPC64_CACHEFLUSH_H
-
-#include <linux/mm.h>
-#include <asm/cputable.h>
-
-/*
- * No cache flushing is required when address mappings are
- * changed, because the caches on PowerPCs are physically
- * addressed.
- */
-#define flush_cache_all()			do { } while (0)
-#define flush_cache_mm(mm)			do { } while (0)
-#define flush_cache_range(vma, start, end)	do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
-#define flush_icache_page(vma, page)		do { } while (0)
-#define flush_cache_vmap(start, end)		do { } while (0)
-#define flush_cache_vunmap(start, end)		do { } while (0)
-
-extern void flush_dcache_page(struct page *page);
-#define flush_dcache_mmap_lock(mapping)		do { } while (0)
-#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
-
-extern void __flush_icache_range(unsigned long, unsigned long);
-extern void flush_icache_user_range(struct vm_area_struct *vma,
-				    struct page *page, unsigned long addr,
-				    int len);
-
-extern void flush_dcache_range(unsigned long start, unsigned long stop);
-extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
-extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
-     flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
-
-extern void __flush_dcache_icache(void *page_va);
-
-static inline void flush_icache_range(unsigned long start, unsigned long stop)
-{
-	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
-		__flush_icache_range(start, stop);
-}
-
-#endif /* _PPC64_CACHEFLUSH_H */
diff --git a/include/asm-ppc64/current.h b/include/asm-ppc64/current.h
deleted file mode 100644
index 52ddc60..0000000
--- a/include/asm-ppc64/current.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _PPC64_CURRENT_H
-#define _PPC64_CURRENT_H
-
-#include <asm/paca.h>
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define get_current()   (get_paca()->__current)
-#define current         get_current()
-
-#endif /* !(_PPC64_CURRENT_H) */
diff --git a/include/asm-ppc64/eeh.h b/include/asm-ppc64/eeh.h
index 40c8eb5..89f26ab 100644
--- a/include/asm-ppc64/eeh.h
+++ b/include/asm-ppc64/eeh.h
@@ -1,4 +1,4 @@
-/* 
+/*
  * eeh.h
  * Copyright (C) 2001  Dave Engebretsen & Todd Inglett IBM Corporation.
  *
@@ -6,12 +6,12 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
@@ -27,8 +27,6 @@
 
 struct pci_dev;
 struct device_node;
-struct device_node;
-struct notifier_block;
 
 #ifdef CONFIG_EEH
 
@@ -37,6 +35,10 @@
 #define EEH_MODE_NOCHECK	(1<<1)
 #define EEH_MODE_ISOLATED	(1<<2)
 
+/* Max number of EEH freezes allowed before we consider the device
+ * to be permanently disabled. */
+#define EEH_MAX_ALLOWED_FREEZES 5
+
 void __init eeh_init(void);
 unsigned long eeh_check_failure(const volatile void __iomem *token,
 				unsigned long val);
@@ -59,36 +61,14 @@
  * eeh_remove_device - undo EEH setup for the indicated pci device
  * @dev: pci device to be removed
  *
- * This routine should be when a device is removed from a running
- * system (e.g. by hotplug or dlpar).
+ * This routine should be called when a device is removed from
+ * a running system (e.g. by hotplug or dlpar).  It unregisters
+ * the PCI device from the EEH subsystem.  I/O errors affecting
+ * this device will no longer be detected after this call; thus,
+ * i/o errors affecting this slot may leave this device unusable.
  */
 void eeh_remove_device(struct pci_dev *);
 
-#define EEH_DISABLE		0
-#define EEH_ENABLE		1
-#define EEH_RELEASE_LOADSTORE	2
-#define EEH_RELEASE_DMA		3
-
-/**
- * Notifier event flags.
- */
-#define EEH_NOTIFY_FREEZE  1
-
-/** EEH event -- structure holding pci slot data that describes
- *  a change in the isolation status of a PCI slot.  A pointer
- *  to this struct is passed as the data pointer in a notify callback.
- */
-struct eeh_event {
-	struct list_head     list;
-	struct pci_dev       *dev;
-	struct device_node   *dn;
-	int                  reset_state;
-};
-
-/** Register to find out about EEH events. */
-int eeh_register_notifier(struct notifier_block *nb);
-int eeh_unregister_notifier(struct notifier_block *nb);
-
 /**
  * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
  *
@@ -129,7 +109,7 @@
 #define EEH_IO_ERROR_VALUE(size) (-1UL)
 #endif /* CONFIG_EEH */
 
-/* 
+/*
  * MMIO read/write operations with EEH support.
  */
 static inline u8 eeh_readb(const volatile void __iomem *addr)
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 4c18a5c..1a7e0af 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -14,7 +14,7 @@
 #define _PPC64_MMU_H_
 
 #include <linux/config.h>
-#include <asm/ppc_asm.h> /* for ASM_CONST */
+#include <asm/asm-compat.h>
 #include <asm/page.h>
 
 /*
@@ -224,9 +224,12 @@
 			     unsigned long pstart, unsigned long mode,
 			     int psize);
 
+extern void htab_initialize(void);
+extern void htab_initialize_secondary(void);
 extern void hpte_init_native(void);
 extern void hpte_init_lpar(void);
 extern void hpte_init_iSeries(void);
+extern void mm_init_ppc64(void);
 
 extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
 				     unsigned long va, unsigned long prpn,
@@ -245,6 +248,7 @@
 
 extern void stabs_alloc(void);
 extern void slb_initialize(void);
+extern void stab_initialize(unsigned long stab);
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/include/asm-ppc64/mmzone.h b/include/asm-ppc64/mmzone.h
index 80a708e..15e777c 100644
--- a/include/asm-ppc64/mmzone.h
+++ b/include/asm-ppc64/mmzone.h
@@ -33,6 +33,9 @@
 extern char *numa_memory_lookup_table;
 extern cpumask_t numa_cpumask_lookup_table[];
 extern int nr_cpus_in_node[];
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern unsigned long max_pfn;
+#endif
 
 /* 16MB regions */
 #define MEMORY_INCREMENT_SHIFT 24
@@ -45,6 +48,11 @@
 {
 	int nid;
 
+#ifdef CONFIG_MEMORY_HOTPLUG
+	/* kludge hot added sections default to node 0 */
+	if (pa >= (max_pfn << PAGE_SHIFT))
+		return 0;
+#endif
 	nid = numa_memory_lookup_table[pa >> MEMORY_INCREMENT_SHIFT];
 
 #ifdef DEBUG_NUMA
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index 82ce187..e32f118 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -11,7 +11,7 @@
  */
 
 #include <linux/config.h>
-#include <asm/ppc_asm.h> /* for ASM_CONST */
+#include <asm/asm-compat.h>
 
 /*
  * We support either 4k or 64k software page size. When using 64k pages
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h
index 60cf8c8..efbdaec 100644
--- a/include/asm-ppc64/pci-bridge.h
+++ b/include/asm-ppc64/pci-bridge.h
@@ -63,7 +63,6 @@
 	int	devfn;			/* for pci devices */
 	int	eeh_mode;		/* See eeh.h for possible EEH_MODEs */
 	int	eeh_config_addr;
-	int	eeh_capable;		/* from firmware */
 	int 	eeh_check_count;	/* # times driver ignored error */
 	int 	eeh_freeze_count;	/* # times this device froze up. */
 	int	eeh_is_bridge;		/* device is pci-to-pci bridge */
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
index 98da0e4..dcf3622 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-ppc64/pgalloc.h
@@ -10,8 +10,8 @@
 
 #ifdef CONFIG_PPC_64K_PAGES
 #define PTE_CACHE_NUM	0
-#define PMD_CACHE_NUM	0
-#define PGD_CACHE_NUM	1
+#define PMD_CACHE_NUM	1
+#define PGD_CACHE_NUM	2
 #else
 #define PTE_CACHE_NUM	0
 #define PMD_CACHE_NUM	1
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
index 76bb026..ddfe186 100644
--- a/include/asm-ppc64/prom.h
+++ b/include/asm-ppc64/prom.h
@@ -204,6 +204,8 @@
 extern unsigned long prom_init(unsigned long, unsigned long, unsigned long,
 	unsigned long, unsigned long);
 extern void finish_device_tree(void);
+extern void unflatten_device_tree(void);
+extern void early_init_devtree(void *);
 extern int device_is_compatible(struct device_node *device, const char *);
 extern int machine_is_compatible(const char *compat);
 extern unsigned char *get_property(struct device_node *node, const char *name,
diff --git a/include/asm-ppc64/signal.h b/include/asm-ppc64/signal.h
deleted file mode 100644
index 432df7dd..0000000
--- a/include/asm-ppc64/signal.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef _ASMPPC64_SIGNAL_H
-#define _ASMPPC64_SIGNAL_H
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <asm/siginfo.h>
-
-/* Avoid too many header ordering problems.  */
-struct siginfo;
-
-#define _NSIG		64
-#define _NSIG_BPW	64
-#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
-
-typedef unsigned long old_sigset_t;		/* at least 32 bits */
-
-typedef struct {
-	unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-#define SIGHUP		 1
-#define SIGINT		 2
-#define SIGQUIT		 3
-#define SIGILL		 4
-#define SIGTRAP		 5
-#define SIGABRT		 6
-#define SIGIOT		 6
-#define SIGBUS		 7
-#define SIGFPE		 8
-#define SIGKILL		 9
-#define SIGUSR1		10
-#define SIGSEGV		11
-#define SIGUSR2		12
-#define SIGPIPE		13
-#define SIGALRM		14
-#define SIGTERM		15
-#define SIGSTKFLT	16
-#define SIGCHLD		17
-#define SIGCONT		18
-#define SIGSTOP		19
-#define SIGTSTP		20
-#define SIGTTIN		21
-#define SIGTTOU		22
-#define SIGURG		23
-#define SIGXCPU		24
-#define SIGXFSZ		25
-#define SIGVTALRM	26
-#define SIGPROF		27
-#define SIGWINCH	28
-#define SIGIO		29
-#define SIGPOLL		SIGIO
-/*
-#define SIGLOST		29
-*/
-#define SIGPWR		30
-#define SIGSYS		31
-#define	SIGUNUSED	31
-
-/* These should not be considered constants from userland.  */
-#define SIGRTMIN	32
-#define SIGRTMAX	_NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
- * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP	0x00000001u
-#define SA_NOCLDWAIT	0x00000002u
-#define SA_SIGINFO	0x00000004u
-#define SA_ONSTACK	0x08000000u
-#define SA_RESTART	0x10000000u
-#define SA_NODEFER	0x40000000u
-#define SA_RESETHAND	0x80000000u
-
-#define SA_NOMASK	SA_NODEFER
-#define SA_ONESHOT	SA_RESETHAND
-#define SA_INTERRUPT	0x20000000u /* dummy -- ignored */
-
-#define SA_RESTORER	0x04000000u
-
-/* 
- * sigaltstack controls
- */
-#define SS_ONSTACK	1
-#define SS_DISABLE	2
-
-#define MINSIGSTKSZ	2048
-#define SIGSTKSZ	8192
-
-#include <asm-generic/signal.h>
-
-struct old_sigaction {
-	__sighandler_t sa_handler;
-	old_sigset_t sa_mask;
-	unsigned long sa_flags;
-	__sigrestore_t sa_restorer;
-};
-
-struct sigaction {
-	__sighandler_t sa_handler;
-	unsigned long sa_flags;
-	__sigrestore_t sa_restorer;
-	sigset_t sa_mask;		/* mask last for extensibility */
-};
-
-struct k_sigaction {
-	struct sigaction sa;
-};
-
-typedef struct sigaltstack {
-	void __user *ss_sp;
-	int ss_flags;
-	size_t ss_size;
-} stack_t;
-
-struct pt_regs;
-struct timespec;
-extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
-extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
-#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
-#endif /* _ASMPPC64_SIGNAL_H */
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 0cdd66c..bf9a6ab 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -149,6 +149,8 @@
 extern struct task_struct * _switch(struct thread_struct *prev,
 				    struct thread_struct *next);
 
+extern unsigned long klimit;
+
 extern int powersave_nap;	/* set if nap mode can be used in idle loop */
 
 /*
diff --git a/include/asm-sh/ide.h b/include/asm-sh/ide.h
index f42cf39..711dad4 100644
--- a/include/asm-sh/ide.h
+++ b/include/asm-sh/ide.h
@@ -16,10 +16,6 @@
 
 #include <linux/config.h>
 
-#ifndef MAX_HWIFS
-#define MAX_HWIFS	CONFIG_IDE_MAX_HWIFS
-#endif
-
 #define ide_default_io_ctl(base)	(0)
 
 #include <asm-generic/ide_iops.h>
diff --git a/include/asm-sh64/ide.h b/include/asm-sh64/ide.h
index 6fd514da..852f50a 100644
--- a/include/asm-sh64/ide.h
+++ b/include/asm-sh64/ide.h
@@ -17,10 +17,6 @@
 
 #include <linux/config.h>
 
-#ifndef MAX_HWIFS
-#define MAX_HWIFS	CONFIG_IDE_MAX_HWIFS
-#endif
-
 /* Without this, the initialisation of PCI IDE cards end up calling
  * ide_init_hwif_ports, which won't work. */
 #ifdef CONFIG_BLK_DEV_IDEPCI
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
new file mode 100644
index 0000000..84f12a4
--- /dev/null
+++ b/include/linux/genetlink.h
@@ -0,0 +1,51 @@
+#ifndef __LINUX_GENERIC_NETLINK_H
+#define __LINUX_GENERIC_NETLINK_H
+
+#include <linux/netlink.h>
+
+#define GENL_NAMSIZ	16	/* length of family name */
+
+#define GENL_MIN_ID	NLMSG_MIN_TYPE
+#define GENL_MAX_ID	1023
+
+struct genlmsghdr {
+	__u8	cmd;
+	__u8	version;
+	__u16	reserved;
+};
+
+#define GENL_HDRLEN	NLMSG_ALIGN(sizeof(struct genlmsghdr))
+
+/*
+ * List of reserved static generic netlink identifiers:
+ */
+#define GENL_ID_GENERATE	0
+#define GENL_ID_CTRL		NLMSG_MIN_TYPE
+
+/**************************************************************************
+ * Controller
+ **************************************************************************/
+
+enum {
+	CTRL_CMD_UNSPEC,
+	CTRL_CMD_NEWFAMILY,
+	CTRL_CMD_DELFAMILY,
+	CTRL_CMD_GETFAMILY,
+	CTRL_CMD_NEWOPS,
+	CTRL_CMD_DELOPS,
+	CTRL_CMD_GETOPS,
+	__CTRL_CMD_MAX,
+};
+
+#define CTRL_CMD_MAX (__CTRL_CMD_MAX - 1)
+
+enum {
+	CTRL_ATTR_UNSPEC,
+	CTRL_ATTR_FAMILY_ID,
+	CTRL_ATTR_FAMILY_NAME,
+	__CTRL_ATTR_MAX,
+};
+
+#define CTRL_ATTR_MAX (__CTRL_ATTR_MAX - 1)
+
+#endif	/* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 3461abc..77ae55d 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -230,6 +230,7 @@
 	int		dma;			/* our dma entry */
 	ide_ack_intr_t	*ack_intr;		/* acknowledge interrupt */
 	hwif_chipset_t  chipset;
+	struct device	*dev;
 } hw_regs_t;
 
 /*
@@ -266,6 +267,10 @@
 
 #include <asm/ide.h>
 
+#ifndef MAX_HWIFS
+#define MAX_HWIFS	CONFIG_IDE_MAX_HWIFS
+#endif
+
 /* needed on alpha, x86/x86_64, ia64, mips, ppc32 and sh */
 #ifndef IDE_ARCH_OBSOLETE_DEFAULTS
 # define ide_default_io_base(index)	(0)
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index d21c305..fe26d43 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -21,6 +21,8 @@
 #ifndef _LINUX_IF_ETHER_H
 #define _LINUX_IF_ETHER_H
 
+#include <linux/types.h>
+
 /*
  *	IEEE 802.3 Ethernet magic constants.  The frame sizes omit the preamble
  *	and FCS/CRC (frame check sequence). 
@@ -100,7 +102,7 @@
 struct ethhdr {
 	unsigned char	h_dest[ETH_ALEN];	/* destination eth addr	*/
 	unsigned char	h_source[ETH_ALEN];	/* source ether addr	*/
-	unsigned short	h_proto;		/* packet type ID field	*/
+	__be16		h_proto;		/* packet type ID field	*/
 } __attribute__((packed));
 
 #ifdef __KERNEL__
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c6efce4..936f8b7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -927,6 +927,13 @@
 extern int		weight_p;
 extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
 extern int skb_checksum_help(struct sk_buff *skb, int inward);
+#ifdef CONFIG_BUG
+extern void netdev_rx_csum_fault(struct net_device *dev);
+#else
+static inline void netdev_rx_csum_fault(struct net_device *dev)
+{
+}
+#endif
 /* rx skb timestamps */
 extern void		net_enable_timestamp(void);
 extern void		net_disable_timestamp(void);
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
new file mode 100644
index 0000000..6d39b51
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -0,0 +1,159 @@
+#ifndef _NF_CONNTRACK_COMMON_H
+#define _NF_CONNTRACK_COMMON_H
+/* Connection state tracking for netfilter.  This is separated from,
+   but required by, the NAT layer; it can also be used by an iptables
+   extension. */
+enum ip_conntrack_info
+{
+	/* Part of an established connection (either direction). */
+	IP_CT_ESTABLISHED,
+
+	/* Like NEW, but related to an existing connection, or ICMP error
+	   (in either direction). */
+	IP_CT_RELATED,
+
+	/* Started a new connection to track (only
+           IP_CT_DIR_ORIGINAL); may be a retransmission. */
+	IP_CT_NEW,
+
+	/* >= this indicates reply direction */
+	IP_CT_IS_REPLY,
+
+	/* Number of distinct IP_CT types (no NEW in reply dirn). */
+	IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
+};
+
+/* Bitset representing status of connection. */
+enum ip_conntrack_status {
+	/* It's an expected connection: bit 0 set.  This bit never changed */
+	IPS_EXPECTED_BIT = 0,
+	IPS_EXPECTED = (1 << IPS_EXPECTED_BIT),
+
+	/* We've seen packets both ways: bit 1 set.  Can be set, not unset. */
+	IPS_SEEN_REPLY_BIT = 1,
+	IPS_SEEN_REPLY = (1 << IPS_SEEN_REPLY_BIT),
+
+	/* Conntrack should never be early-expired. */
+	IPS_ASSURED_BIT = 2,
+	IPS_ASSURED = (1 << IPS_ASSURED_BIT),
+
+	/* Connection is confirmed: originating packet has left box */
+	IPS_CONFIRMED_BIT = 3,
+	IPS_CONFIRMED = (1 << IPS_CONFIRMED_BIT),
+
+	/* Connection needs src nat in orig dir.  This bit never changed. */
+	IPS_SRC_NAT_BIT = 4,
+	IPS_SRC_NAT = (1 << IPS_SRC_NAT_BIT),
+
+	/* Connection needs dst nat in orig dir.  This bit never changed. */
+	IPS_DST_NAT_BIT = 5,
+	IPS_DST_NAT = (1 << IPS_DST_NAT_BIT),
+
+	/* Both together. */
+	IPS_NAT_MASK = (IPS_DST_NAT | IPS_SRC_NAT),
+
+	/* Connection needs TCP sequence adjusted. */
+	IPS_SEQ_ADJUST_BIT = 6,
+	IPS_SEQ_ADJUST = (1 << IPS_SEQ_ADJUST_BIT),
+
+	/* NAT initialization bits. */
+	IPS_SRC_NAT_DONE_BIT = 7,
+	IPS_SRC_NAT_DONE = (1 << IPS_SRC_NAT_DONE_BIT),
+
+	IPS_DST_NAT_DONE_BIT = 8,
+	IPS_DST_NAT_DONE = (1 << IPS_DST_NAT_DONE_BIT),
+
+	/* Both together */
+	IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE),
+
+	/* Connection is dying (removed from lists), can not be unset. */
+	IPS_DYING_BIT = 9,
+	IPS_DYING = (1 << IPS_DYING_BIT),
+};
+
+/* Connection tracking event bits */
+enum ip_conntrack_events
+{
+	/* New conntrack */
+	IPCT_NEW_BIT = 0,
+	IPCT_NEW = (1 << IPCT_NEW_BIT),
+
+	/* Expected connection */
+	IPCT_RELATED_BIT = 1,
+	IPCT_RELATED = (1 << IPCT_RELATED_BIT),
+
+	/* Destroyed conntrack */
+	IPCT_DESTROY_BIT = 2,
+	IPCT_DESTROY = (1 << IPCT_DESTROY_BIT),
+
+	/* Timer has been refreshed */
+	IPCT_REFRESH_BIT = 3,
+	IPCT_REFRESH = (1 << IPCT_REFRESH_BIT),
+
+	/* Status has changed */
+	IPCT_STATUS_BIT = 4,
+	IPCT_STATUS = (1 << IPCT_STATUS_BIT),
+
+	/* Update of protocol info */
+	IPCT_PROTOINFO_BIT = 5,
+	IPCT_PROTOINFO = (1 << IPCT_PROTOINFO_BIT),
+
+	/* Volatile protocol info */
+	IPCT_PROTOINFO_VOLATILE_BIT = 6,
+	IPCT_PROTOINFO_VOLATILE = (1 << IPCT_PROTOINFO_VOLATILE_BIT),
+
+	/* New helper for conntrack */
+	IPCT_HELPER_BIT = 7,
+	IPCT_HELPER = (1 << IPCT_HELPER_BIT),
+
+	/* Update of helper info */
+	IPCT_HELPINFO_BIT = 8,
+	IPCT_HELPINFO = (1 << IPCT_HELPINFO_BIT),
+
+	/* Volatile helper info */
+	IPCT_HELPINFO_VOLATILE_BIT = 9,
+	IPCT_HELPINFO_VOLATILE = (1 << IPCT_HELPINFO_VOLATILE_BIT),
+
+	/* NAT info */
+	IPCT_NATINFO_BIT = 10,
+	IPCT_NATINFO = (1 << IPCT_NATINFO_BIT),
+
+	/* Counter highest bit has been set */
+	IPCT_COUNTER_FILLING_BIT = 11,
+	IPCT_COUNTER_FILLING = (1 << IPCT_COUNTER_FILLING_BIT),
+};
+
+enum ip_conntrack_expect_events {
+	IPEXP_NEW_BIT = 0,
+	IPEXP_NEW = (1 << IPEXP_NEW_BIT),
+};
+
+#ifdef __KERNEL__
+struct ip_conntrack_counter
+{
+	u_int32_t packets;
+	u_int32_t bytes;
+};
+
+struct ip_conntrack_stat
+{
+	unsigned int searched;
+	unsigned int found;
+	unsigned int new;
+	unsigned int invalid;
+	unsigned int ignore;
+	unsigned int delete;
+	unsigned int delete_list;
+	unsigned int insert;
+	unsigned int insert_failed;
+	unsigned int drop;
+	unsigned int early_drop;
+	unsigned int error;
+	unsigned int expect_new;
+	unsigned int expect_create;
+	unsigned int expect_delete;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h
new file mode 100644
index 0000000..ad4a41c
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_ftp.h
@@ -0,0 +1,44 @@
+#ifndef _NF_CONNTRACK_FTP_H
+#define _NF_CONNTRACK_FTP_H
+/* FTP tracking. */
+
+/* This enum is exposed to userspace */
+enum ip_ct_ftp_type
+{
+	/* PORT command from client */
+	IP_CT_FTP_PORT,
+	/* PASV response from server */
+	IP_CT_FTP_PASV,
+	/* EPRT command from client */
+	IP_CT_FTP_EPRT,
+	/* EPSV response from server */
+	IP_CT_FTP_EPSV,
+};
+
+#ifdef __KERNEL__
+
+#define FTP_PORT	21
+
+#define NUM_SEQ_TO_REMEMBER 2
+/* This structure exists only once per master */
+struct ip_ct_ftp_master {
+	/* Valid seq positions for cmd matching after newline */
+	u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
+	/* 0 means seq_match_aft_nl not set */
+	int seq_aft_nl_num[IP_CT_DIR_MAX];
+};
+
+struct ip_conntrack_expect;
+
+/* For NAT to hook in when we find a packet which describes what other
+ * connection we should expect. */
+extern unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb,
+				       enum ip_conntrack_info ctinfo,
+				       enum ip_ct_ftp_type type,
+				       unsigned int matchoff,
+				       unsigned int matchlen,
+				       struct ip_conntrack_expect *exp,
+				       u32 *seq);
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_FTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
new file mode 100644
index 0000000..b8994d9
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -0,0 +1,27 @@
+#ifndef _NF_CONNTRACK_SCTP_H
+#define _NF_CONNTRACK_SCTP_H
+/* SCTP tracking. */
+
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+
+enum sctp_conntrack {
+	SCTP_CONNTRACK_NONE,
+	SCTP_CONNTRACK_CLOSED,
+	SCTP_CONNTRACK_COOKIE_WAIT,
+	SCTP_CONNTRACK_COOKIE_ECHOED,
+	SCTP_CONNTRACK_ESTABLISHED,
+	SCTP_CONNTRACK_SHUTDOWN_SENT,
+	SCTP_CONNTRACK_SHUTDOWN_RECD,
+	SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+	SCTP_CONNTRACK_MAX
+};
+
+struct ip_ct_sctp
+{
+	enum sctp_conntrack state;
+
+	u_int32_t vtag[IP_CT_DIR_MAX];
+	u_int32_t ttag[IP_CT_DIR_MAX];
+};
+
+#endif /* _NF_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h
new file mode 100644
index 0000000..b2feeff
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_tcp.h
@@ -0,0 +1,56 @@
+#ifndef _NF_CONNTRACK_TCP_H
+#define _NF_CONNTRACK_TCP_H
+/* TCP tracking. */
+
+/* This is exposed to userspace (ctnetlink) */
+enum tcp_conntrack {
+	TCP_CONNTRACK_NONE,
+	TCP_CONNTRACK_SYN_SENT,
+	TCP_CONNTRACK_SYN_RECV,
+	TCP_CONNTRACK_ESTABLISHED,
+	TCP_CONNTRACK_FIN_WAIT,
+	TCP_CONNTRACK_CLOSE_WAIT,
+	TCP_CONNTRACK_LAST_ACK,
+	TCP_CONNTRACK_TIME_WAIT,
+	TCP_CONNTRACK_CLOSE,
+	TCP_CONNTRACK_LISTEN,
+	TCP_CONNTRACK_MAX,
+	TCP_CONNTRACK_IGNORE
+};
+
+/* Window scaling is advertised by the sender */
+#define IP_CT_TCP_FLAG_WINDOW_SCALE		0x01
+
+/* SACK is permitted by the sender */
+#define IP_CT_TCP_FLAG_SACK_PERM		0x02
+
+/* This sender sent FIN first */
+#define IP_CT_TCP_FLAG_CLOSE_INIT		0x03
+
+#ifdef __KERNEL__
+
+struct ip_ct_tcp_state {
+	u_int32_t	td_end;		/* max of seq + len */
+	u_int32_t	td_maxend;	/* max of ack + max(win, 1) */
+	u_int32_t	td_maxwin;	/* max(win) */
+	u_int8_t	td_scale;	/* window scale factor */
+	u_int8_t	loose;		/* used when connection picked up from the middle */
+	u_int8_t	flags;		/* per direction options */
+};
+
+struct ip_ct_tcp
+{
+	struct ip_ct_tcp_state seen[2];	/* connection parameters per direction */
+	u_int8_t	state;		/* state of the connection (enum tcp_conntrack) */
+	/* For detecting stale connections */
+	u_int8_t	last_dir;	/* Direction of the last packet (enum ip_conntrack_dir) */
+	u_int8_t	retrans;	/* Number of retransmitted packets */
+	u_int8_t	last_index;	/* Index of the last packet */
+	u_int32_t	last_seq;	/* Last sequence number seen in dir */
+	u_int32_t	last_ack;	/* Last sequence number seen in opposite dir */
+	u_int32_t	last_end;	/* Last seq + len */
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_TCP_H */
diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h
new file mode 100644
index 0000000..8e145f0
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_tuple_common.h
@@ -0,0 +1,13 @@
+#ifndef _NF_CONNTRACK_TUPLE_COMMON_H
+#define _NF_CONNTRACK_TUPLE_COMMON_H
+
+enum ip_conntrack_dir
+{
+	IP_CT_DIR_ORIGINAL,
+	IP_CT_DIR_REPLY,
+	IP_CT_DIR_MAX
+};
+
+#define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL)
+
+#endif /* _NF_CONNTRACK_TUPLE_COMMON_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index d078bb9..b3432ab 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -1,132 +1,7 @@
 #ifndef _IP_CONNTRACK_H
 #define _IP_CONNTRACK_H
-/* Connection state tracking for netfilter.  This is separated from,
-   but required by, the NAT layer; it can also be used by an iptables
-   extension. */
-enum ip_conntrack_info
-{
-	/* Part of an established connection (either direction). */
-	IP_CT_ESTABLISHED,
 
-	/* Like NEW, but related to an existing connection, or ICMP error
-	   (in either direction). */
-	IP_CT_RELATED,
-
-	/* Started a new connection to track (only
-           IP_CT_DIR_ORIGINAL); may be a retransmission. */
-	IP_CT_NEW,
-
-	/* >= this indicates reply direction */
-	IP_CT_IS_REPLY,
-
-	/* Number of distinct IP_CT types (no NEW in reply dirn). */
-	IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
-};
-
-/* Bitset representing status of connection. */
-enum ip_conntrack_status {
-	/* It's an expected connection: bit 0 set.  This bit never changed */
-	IPS_EXPECTED_BIT = 0,
-	IPS_EXPECTED = (1 << IPS_EXPECTED_BIT),
-
-	/* We've seen packets both ways: bit 1 set.  Can be set, not unset. */
-	IPS_SEEN_REPLY_BIT = 1,
-	IPS_SEEN_REPLY = (1 << IPS_SEEN_REPLY_BIT),
-
-	/* Conntrack should never be early-expired. */
-	IPS_ASSURED_BIT = 2,
-	IPS_ASSURED = (1 << IPS_ASSURED_BIT),
-
-	/* Connection is confirmed: originating packet has left box */
-	IPS_CONFIRMED_BIT = 3,
-	IPS_CONFIRMED = (1 << IPS_CONFIRMED_BIT),
-
-	/* Connection needs src nat in orig dir.  This bit never changed. */
-	IPS_SRC_NAT_BIT = 4,
-	IPS_SRC_NAT = (1 << IPS_SRC_NAT_BIT),
-
-	/* Connection needs dst nat in orig dir.  This bit never changed. */
-	IPS_DST_NAT_BIT = 5,
-	IPS_DST_NAT = (1 << IPS_DST_NAT_BIT),
-
-	/* Both together. */
-	IPS_NAT_MASK = (IPS_DST_NAT | IPS_SRC_NAT),
-
-	/* Connection needs TCP sequence adjusted. */
-	IPS_SEQ_ADJUST_BIT = 6,
-	IPS_SEQ_ADJUST = (1 << IPS_SEQ_ADJUST_BIT),
-
-	/* NAT initialization bits. */
-	IPS_SRC_NAT_DONE_BIT = 7,
-	IPS_SRC_NAT_DONE = (1 << IPS_SRC_NAT_DONE_BIT),
-
-	IPS_DST_NAT_DONE_BIT = 8,
-	IPS_DST_NAT_DONE = (1 << IPS_DST_NAT_DONE_BIT),
-
-	/* Both together */
-	IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE),
-
-	/* Connection is dying (removed from lists), can not be unset. */
-	IPS_DYING_BIT = 9,
-	IPS_DYING = (1 << IPS_DYING_BIT),
-};
-
-/* Connection tracking event bits */
-enum ip_conntrack_events
-{
-	/* New conntrack */
-	IPCT_NEW_BIT = 0,
-	IPCT_NEW = (1 << IPCT_NEW_BIT),
-
-	/* Expected connection */
-	IPCT_RELATED_BIT = 1,
-	IPCT_RELATED = (1 << IPCT_RELATED_BIT),
-
-	/* Destroyed conntrack */
-	IPCT_DESTROY_BIT = 2,
-	IPCT_DESTROY = (1 << IPCT_DESTROY_BIT),
-
-	/* Timer has been refreshed */
-	IPCT_REFRESH_BIT = 3,
-	IPCT_REFRESH = (1 << IPCT_REFRESH_BIT),
-
-	/* Status has changed */
-	IPCT_STATUS_BIT = 4,
-	IPCT_STATUS = (1 << IPCT_STATUS_BIT),
-
-	/* Update of protocol info */
-	IPCT_PROTOINFO_BIT = 5,
-	IPCT_PROTOINFO = (1 << IPCT_PROTOINFO_BIT),
-
-	/* Volatile protocol info */
-	IPCT_PROTOINFO_VOLATILE_BIT = 6,
-	IPCT_PROTOINFO_VOLATILE = (1 << IPCT_PROTOINFO_VOLATILE_BIT),
-
-	/* New helper for conntrack */
-	IPCT_HELPER_BIT = 7,
-	IPCT_HELPER = (1 << IPCT_HELPER_BIT),
-
-	/* Update of helper info */
-	IPCT_HELPINFO_BIT = 8,
-	IPCT_HELPINFO = (1 << IPCT_HELPINFO_BIT),
-
-	/* Volatile helper info */
-	IPCT_HELPINFO_VOLATILE_BIT = 9,
-	IPCT_HELPINFO_VOLATILE = (1 << IPCT_HELPINFO_VOLATILE_BIT),
-
-	/* NAT info */
-	IPCT_NATINFO_BIT = 10,
-	IPCT_NATINFO = (1 << IPCT_NATINFO_BIT),
-
-	/* Counter highest bit has been set */
-	IPCT_COUNTER_FILLING_BIT = 11,
-	IPCT_COUNTER_FILLING = (1 << IPCT_COUNTER_FILLING_BIT),
-};
-
-enum ip_conntrack_expect_events {
-	IPEXP_NEW_BIT = 0,
-	IPEXP_NEW = (1 << IPEXP_NEW_BIT),
-};
+#include <linux/netfilter/nf_conntrack_common.h>
 
 #ifdef __KERNEL__
 #include <linux/config.h>
@@ -194,12 +69,6 @@
 #define IP_NF_ASSERT(x)
 #endif
 
-struct ip_conntrack_counter
-{
-	u_int32_t packets;
-	u_int32_t bytes;
-};
-
 struct ip_conntrack_helper;
 
 struct ip_conntrack
@@ -426,25 +295,6 @@
 
 extern unsigned int ip_conntrack_htable_size;
  
-struct ip_conntrack_stat
-{
-	unsigned int searched;
-	unsigned int found;
-	unsigned int new;
-	unsigned int invalid;
-	unsigned int ignore;
-	unsigned int delete;
-	unsigned int delete_list;
-	unsigned int insert;
-	unsigned int insert_failed;
-	unsigned int drop;
-	unsigned int early_drop;
-	unsigned int error;
-	unsigned int expect_new;
-	unsigned int expect_create;
-	unsigned int expect_delete;
-};
-
 #define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++)
 
 #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_ftp.h b/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
index 5f06429..6381193 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
@@ -1,43 +1,6 @@
 #ifndef _IP_CONNTRACK_FTP_H
 #define _IP_CONNTRACK_FTP_H
-/* FTP tracking. */
 
-#ifdef __KERNEL__
+#include <linux/netfilter/nf_conntrack_ftp.h>
 
-#define FTP_PORT	21
-
-#endif /* __KERNEL__ */
-
-enum ip_ct_ftp_type
-{
-	/* PORT command from client */
-	IP_CT_FTP_PORT,
-	/* PASV response from server */
-	IP_CT_FTP_PASV,
-	/* EPRT command from client */
-	IP_CT_FTP_EPRT,
-	/* EPSV response from server */
-	IP_CT_FTP_EPSV,
-};
-
-#define NUM_SEQ_TO_REMEMBER 2
-/* This structure exists only once per master */
-struct ip_ct_ftp_master {
-	/* Valid seq positions for cmd matching after newline */
-	u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
-	/* 0 means seq_match_aft_nl not set */
-	int seq_aft_nl_num[IP_CT_DIR_MAX];
-};
-
-struct ip_conntrack_expect;
-
-/* For NAT to hook in when we find a packet which describes what other
- * connection we should expect. */
-extern unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb,
-				       enum ip_conntrack_info ctinfo,
-				       enum ip_ct_ftp_type type,
-				       unsigned int matchoff,
-				       unsigned int matchlen,
-				       struct ip_conntrack_expect *exp,
-				       u32 *seq);
 #endif /* _IP_CONNTRACK_FTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_icmp.h b/include/linux/netfilter_ipv4/ip_conntrack_icmp.h
index f1664ab..eed5ee3 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_icmp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_icmp.h
@@ -1,11 +1,6 @@
 #ifndef _IP_CONNTRACK_ICMP_H
 #define _IP_CONNTRACK_ICMP_H
-/* ICMP tracking. */
-#include <asm/atomic.h>
 
-struct ip_ct_icmp
-{
-	/* Optimization: when number in == number out, forget immediately. */
-	atomic_t count;
-};
+#include <net/netfilter/ipv4/nf_conntrack_icmp.h>
+
 #endif /* _IP_CONNTRACK_ICMP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_sctp.h b/include/linux/netfilter_ipv4/ip_conntrack_sctp.h
index 7a8d869..4099a04 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_sctp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_sctp.h
@@ -1,25 +1,6 @@
 #ifndef _IP_CONNTRACK_SCTP_H
 #define _IP_CONNTRACK_SCTP_H
-/* SCTP tracking. */
 
-enum sctp_conntrack {
-	SCTP_CONNTRACK_NONE,
-	SCTP_CONNTRACK_CLOSED,
-	SCTP_CONNTRACK_COOKIE_WAIT,
-	SCTP_CONNTRACK_COOKIE_ECHOED,
-	SCTP_CONNTRACK_ESTABLISHED,
-	SCTP_CONNTRACK_SHUTDOWN_SENT,
-	SCTP_CONNTRACK_SHUTDOWN_RECD,
-	SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
-	SCTP_CONNTRACK_MAX
-};
-
-struct ip_ct_sctp
-{
-	enum sctp_conntrack state;
-
-	u_int32_t vtag[IP_CT_DIR_MAX];
-	u_int32_t ttag[IP_CT_DIR_MAX];
-};
+#include <linux/netfilter/nf_conntrack_sctp.h>
 
 #endif /* _IP_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tcp.h b/include/linux/netfilter_ipv4/ip_conntrack_tcp.h
index 16da044..876b8fb 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_tcp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_tcp.h
@@ -1,51 +1,6 @@
 #ifndef _IP_CONNTRACK_TCP_H
 #define _IP_CONNTRACK_TCP_H
-/* TCP tracking. */
 
-enum tcp_conntrack {
-	TCP_CONNTRACK_NONE,
-	TCP_CONNTRACK_SYN_SENT,
-	TCP_CONNTRACK_SYN_RECV,
-	TCP_CONNTRACK_ESTABLISHED,
-	TCP_CONNTRACK_FIN_WAIT,
-	TCP_CONNTRACK_CLOSE_WAIT,
-	TCP_CONNTRACK_LAST_ACK,
-	TCP_CONNTRACK_TIME_WAIT,
-	TCP_CONNTRACK_CLOSE,
-	TCP_CONNTRACK_LISTEN,
-	TCP_CONNTRACK_MAX,
-	TCP_CONNTRACK_IGNORE
-};
-
-/* Window scaling is advertised by the sender */
-#define IP_CT_TCP_FLAG_WINDOW_SCALE		0x01
-
-/* SACK is permitted by the sender */
-#define IP_CT_TCP_FLAG_SACK_PERM		0x02
-
-/* This sender sent FIN first */
-#define IP_CT_TCP_FLAG_CLOSE_INIT		0x03
-
-struct ip_ct_tcp_state {
-	u_int32_t	td_end;		/* max of seq + len */
-	u_int32_t	td_maxend;	/* max of ack + max(win, 1) */
-	u_int32_t	td_maxwin;	/* max(win) */
-	u_int8_t	td_scale;	/* window scale factor */
-	u_int8_t	loose;		/* used when connection picked up from the middle */
-	u_int8_t	flags;		/* per direction options */
-};
-
-struct ip_ct_tcp
-{
-	struct ip_ct_tcp_state seen[2];	/* connection parameters per direction */
-	u_int8_t	state;		/* state of the connection (enum tcp_conntrack) */
-	/* For detecting stale connections */
-	u_int8_t	last_dir;	/* Direction of the last packet (enum ip_conntrack_dir) */
-	u_int8_t	retrans;	/* Number of retransmitted packets */
-	u_int8_t	last_index;	/* Index of the last packet */
-	u_int32_t	last_seq;	/* Last sequence number seen in dir */
-	u_int32_t	last_ack;	/* Last sequence number seen in opposite dir */
-	u_int32_t	last_end;	/* Last seq + len */
-};
+#include <linux/netfilter/nf_conntrack_tcp.h>
 
 #endif /* _IP_CONNTRACK_TCP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
index 3232db1..2fdabdb 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
@@ -2,6 +2,7 @@
 #define _IP_CONNTRACK_TUPLE_H
 
 #include <linux/types.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
 
 /* A `tuple' is a structure containing the information to uniquely
   identify a connection.  ie. if two packets have the same tuple, they
@@ -88,13 +89,6 @@
 		(tuple)->dst.u.all = 0;				\
 	} while (0)
 
-enum ip_conntrack_dir
-{
-	IP_CT_DIR_ORIGINAL,
-	IP_CT_DIR_REPLY,
-	IP_CT_DIR_MAX
-};
-
 #ifdef __KERNEL__
 
 #define DUMP_TUPLE(tp)						\
@@ -103,8 +97,6 @@
        NIPQUAD((tp)->src.ip), ntohs((tp)->src.u.all),		\
        NIPQUAD((tp)->dst.ip), ntohs((tp)->dst.u.all))
 
-#define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL)
-
 /* If we're the first tuple, it's the original dir. */
 #define DIRECTION(h) ((enum ip_conntrack_dir)(h)->tuple.dst.dir)
 
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index edcc2c6..53b2983 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -59,6 +59,7 @@
 
 enum nf_ip6_hook_priorities {
 	NF_IP6_PRI_FIRST = INT_MIN,
+	NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
 	NF_IP6_PRI_SELINUX_FIRST = -225,
 	NF_IP6_PRI_CONNTRACK = -200,
 	NF_IP6_PRI_BRIDGE_SABOTAGE_FORWARD = -175,
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index ba25ca8..6a2ccf7 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -71,7 +71,8 @@
 
 #define NLMSG_ALIGNTO	4
 #define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
-#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(sizeof(struct nlmsghdr)))
+#define NLMSG_HDRLEN	 ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
+#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN))
 #define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
 #define NLMSG_DATA(nlh)  ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
 #define NLMSG_NEXT(nlh,len)	 ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
@@ -86,6 +87,8 @@
 #define NLMSG_DONE		0x3	/* End of a dump	*/
 #define NLMSG_OVERRUN		0x4	/* Data lost		*/
 
+#define NLMSG_MIN_TYPE		0x10	/* < 0x10: reserved control messages */
+
 struct nlmsgerr
 {
 	int		error;
@@ -108,6 +111,25 @@
 	NETLINK_CONNECTED,
 };
 
+/*
+ *  <------- NLA_HDRLEN ------> <-- NLA_ALIGN(payload)-->
+ * +---------------------+- - -+- - - - - - - - - -+- - -+
+ * |        Header       | Pad |     Payload       | Pad |
+ * |   (struct nlattr)   | ing |                   | ing |
+ * +---------------------+- - -+- - - - - - - - - -+- - -+
+ *  <-------------- nlattr->nla_len -------------->
+ */
+
+struct nlattr
+{
+	__u16           nla_len;
+	__u16           nla_type;
+};
+
+#define NLA_ALIGNTO		4
+#define NLA_ALIGN(len)		(((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
+#define NLA_HDRLEN		((int) NLA_ALIGN(sizeof(struct nlattr)))
+
 #ifdef __KERNEL__
 
 #include <linux/capability.h>
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 9a96f05..4e06eb0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -387,6 +387,7 @@
 #define PCI_DEVICE_ID_NS_SC1100_SMI	0x0511
 #define PCI_DEVICE_ID_NS_SC1100_XBUS	0x0515
 #define PCI_DEVICE_ID_NS_87410		0xd001
+#define PCI_DEVICE_ID_NS_CS5535_IDE	0x002d
 
 #define PCI_VENDOR_ID_TSENG		0x100c
 #define PCI_DEVICE_ID_TSENG_W32P_2	0x3202
@@ -487,6 +488,8 @@
 #define PCI_DEVICE_ID_AMD_8151_0	0x7454
 #define PCI_DEVICE_ID_AMD_8131_APIC     0x7450
 
+#define PCI_DEVICE_ID_AMD_CS5536_IDE	0x209A
+
 #define PCI_VENDOR_ID_TRIDENT		0x1023
 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX	0x2000
 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX	0x2001
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fdfb8fe..0a8ea8b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -274,6 +274,9 @@
 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 	__u8			ipvs_property:1;
 #endif
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+	struct sk_buff		*nfct_reasm;
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	struct nf_bridge_info	*nf_bridge;
 #endif
@@ -1233,8 +1236,7 @@
 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
 					       int offset, struct iovec *to,
 					       int size);
-extern int	       skb_copy_and_csum_datagram_iovec(const
-							struct sk_buff *skb,
+extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
 							int hlen,
 							struct iovec *iov);
 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
@@ -1302,6 +1304,30 @@
 
 extern void __net_timestamp(struct sk_buff *skb);
 
+extern unsigned int __skb_checksum_complete(struct sk_buff *skb);
+
+/**
+ *	skb_checksum_complete - Calculate checksum of an entire packet
+ *	@skb: packet to process
+ *
+ *	This function calculates the checksum over the entire packet plus
+ *	the value of skb->csum.  The latter can be used to supply the
+ *	checksum of a pseudo header as used by TCP/UDP.  It returns the
+ *	checksum.
+ *
+ *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
+ *	this function can be used to verify that checksum on received
+ *	packets.  In that case the function should return zero if the
+ *	checksum is correct.  In particular, this function will return zero
+ *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
+ *	hardware has already verified the correctness of the checksum.
+ */
+static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
+{
+	return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+		__skb_checksum_complete(skb);
+}
+
 #ifdef CONFIG_NETFILTER
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
 {
@@ -1313,10 +1339,26 @@
 	if (nfct)
 		atomic_inc(&nfct->use);
 }
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
+{
+	if (skb)
+		atomic_inc(&skb->users);
+}
+static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
+{
+	if (skb)
+		kfree_skb(skb);
+}
+#endif
 static inline void nf_reset(struct sk_buff *skb)
 {
 	nf_conntrack_put(skb->nfct);
 	skb->nfct = NULL;
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+	nf_conntrack_put_reasm(skb->nfct_reasm);
+	skb->nfct_reasm = NULL;
+#endif
 }
 
 #ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index fc131d6..22cf5e1 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -205,6 +205,7 @@
 	NET_ECONET=16,
 	NET_SCTP=17,
 	NET_LLC=18,
+	NET_NETFILTER=19,
 };
 
 /* /proc/sys/kernel/random */
@@ -270,6 +271,42 @@
 	NET_UNIX_MAX_DGRAM_QLEN=3,
 };
 
+/* /proc/sys/net/netfilter */
+enum
+{
+	NET_NF_CONNTRACK_MAX=1,
+	NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
+	NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
+	NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
+	NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
+	NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
+	NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
+	NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
+	NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
+	NET_NF_CONNTRACK_UDP_TIMEOUT=10,
+	NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
+	NET_NF_CONNTRACK_ICMP_TIMEOUT=12,
+	NET_NF_CONNTRACK_GENERIC_TIMEOUT=13,
+	NET_NF_CONNTRACK_BUCKETS=14,
+	NET_NF_CONNTRACK_LOG_INVALID=15,
+	NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
+	NET_NF_CONNTRACK_TCP_LOOSE=17,
+	NET_NF_CONNTRACK_TCP_BE_LIBERAL=18,
+	NET_NF_CONNTRACK_TCP_MAX_RETRANS=19,
+	NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
+	NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
+	NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
+	NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
+	NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
+	NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
+	NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
+	NET_NF_CONNTRACK_COUNT=27,
+	NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28,
+	NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
+	NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
+	NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
+};
+
 /* /proc/sys/net/ipv4 */
 enum
 {
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
new file mode 100644
index 0000000..52d8b1a
--- /dev/null
+++ b/include/net/genetlink.h
@@ -0,0 +1,154 @@
+#ifndef __NET_GENERIC_NETLINK_H
+#define __NET_GENERIC_NETLINK_H
+
+#include <linux/genetlink.h>
+#include <net/netlink.h>
+
+/**
+ * struct genl_family - generic netlink family
+ * @id: protocol family idenfitier
+ * @hdrsize: length of user specific header in bytes
+ * @name: name of family
+ * @version: protocol version
+ * @maxattr: maximum number of attributes supported
+ * @attrbuf: buffer to store parsed attributes
+ * @ops_list: list of all assigned operations
+ * @family_list: family list
+ */
+struct genl_family
+{
+	unsigned int		id;
+	unsigned int		hdrsize;
+	char			name[GENL_NAMSIZ];
+	unsigned int		version;
+	unsigned int		maxattr;
+	struct module *		owner;
+	struct nlattr **	attrbuf;	/* private */
+	struct list_head	ops_list;	/* private */
+	struct list_head	family_list;	/* private */
+};
+
+#define GENL_ADMIN_PERM		0x01
+
+/**
+ * struct genl_info - receiving information
+ * @snd_seq: sending sequence number
+ * @snd_pid: netlink pid of sender
+ * @nlhdr: netlink message header
+ * @genlhdr: generic netlink message header
+ * @userhdr: user specific header
+ * @attrs: netlink attributes
+ */
+struct genl_info
+{
+	u32			snd_seq;
+	u32			snd_pid;
+	struct nlmsghdr *	nlhdr;
+	struct genlmsghdr *	genlhdr;
+	void *			userhdr;
+	struct nlattr **	attrs;
+};
+
+/**
+ * struct genl_ops - generic netlink operations
+ * @cmd: command identifier
+ * @flags: flags
+ * @policy: attribute validation policy
+ * @doit: standard command callback
+ * @dumpit: callback for dumpers
+ * @ops_list: operations list
+ */
+struct genl_ops
+{
+	unsigned int		cmd;
+	unsigned int		flags;
+	struct nla_policy	*policy;
+	int		       (*doit)(struct sk_buff *skb,
+				       struct genl_info *info);
+	int		       (*dumpit)(struct sk_buff *skb,
+					 struct netlink_callback *cb);
+	struct list_head	ops_list;
+};
+
+extern int genl_register_family(struct genl_family *family);
+extern int genl_unregister_family(struct genl_family *family);
+extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
+extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
+
+extern struct sock *genl_sock;
+
+/**
+ * genlmsg_put - Add generic netlink header to netlink message
+ * @skb: socket buffer holding the message
+ * @pid: netlink pid the message is addressed to
+ * @seq: sequence number (usually the one of the sender)
+ * @type: netlink message type
+ * @hdrlen: length of the user specific header
+ * @flags netlink message flags
+ * @cmd: generic netlink command
+ * @version: version
+ *
+ * Returns pointer to user specific header
+ */
+static inline void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+				int type, int hdrlen, int flags,
+				u8 cmd, u8 version)
+{
+	struct nlmsghdr *nlh;
+	struct genlmsghdr *hdr;
+
+	nlh = nlmsg_put(skb, pid, seq, type, GENL_HDRLEN + hdrlen, flags);
+	if (nlh == NULL)
+		return NULL;
+
+	hdr = nlmsg_data(nlh);
+	hdr->cmd = cmd;
+	hdr->version = version;
+	hdr->reserved = 0;
+
+	return (char *) hdr + GENL_HDRLEN;
+}
+
+/**
+ * genlmsg_end - Finalize a generic netlink message
+ * @skb: socket buffer the message is stored in
+ * @hdr: user specific header
+ */
+static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
+{
+	return nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+}
+
+/**
+ * genlmsg_cancel - Cancel construction of a generic netlink message
+ * @skb: socket buffer the message is stored in
+ * @hdr: generic netlink message header
+ */
+static inline int genlmsg_cancel(struct sk_buff *skb, void *hdr)
+{
+	return nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+}
+
+/**
+ * genlmsg_multicast - multicast a netlink message
+ * @skb: netlink message as socket buffer
+ * @pid: own netlink pid to avoid sending to yourself
+ * @group: multicast group id
+ */
+static inline int genlmsg_multicast(struct sk_buff *skb, u32 pid,
+				    unsigned int group)
+{
+	return nlmsg_multicast(genl_sock, skb, pid, group);
+}
+
+/**
+ * genlmsg_unicast - unicast a netlink message
+ * @skb: netlink message as socket buffer
+ * @pid: netlink pid of the destination socket
+ */
+static inline int genlmsg_unicast(struct sk_buff *skb, u32 pid)
+{
+	return nlmsg_unicast(genl_sock, skb, pid);
+}
+
+#endif	/* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/netfilter/ipv4/nf_conntrack_icmp.h b/include/net/netfilter/ipv4/nf_conntrack_icmp.h
new file mode 100644
index 0000000..3dd22cf
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_conntrack_icmp.h
@@ -0,0 +1,11 @@
+#ifndef _NF_CONNTRACK_ICMP_H
+#define _NF_CONNTRACK_ICMP_H
+/* ICMP tracking. */
+#include <asm/atomic.h>
+
+struct ip_ct_icmp
+{
+	/* Optimization: when number in == number out, forget immediately. */
+	atomic_t count;
+};
+#endif /* _NF_CONNTRACK_ICMP_H */
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
new file mode 100644
index 0000000..25b081a
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -0,0 +1,43 @@
+/*
+ * IPv4 support for nf_conntrack.
+ *
+ * 23 Mar 2004: Yasuyuki Kozakai @ USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- move L3 protocol dependent part from include/linux/netfilter_ipv4/
+ *	  ip_conntarck.h
+ */
+
+#ifndef _NF_CONNTRACK_IPV4_H
+#define _NF_CONNTRACK_IPV4_H
+
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+#include <linux/netfilter_ipv4/ip_nat.h>
+
+/* per conntrack: nat application helper private data */
+union ip_conntrack_nat_help {
+        /* insert nat helper private data here */
+};
+
+struct nf_conntrack_ipv4_nat {
+	struct ip_nat_info info;
+	union ip_conntrack_nat_help help;
+#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
+	defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
+	int masq_index;
+#endif
+};
+#endif /* CONFIG_IP_NF_NAT_NEEDED */
+
+struct nf_conntrack_ipv4 {
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+	struct nf_conntrack_ipv4_nat *nat;
+#endif
+};
+
+/* Returns new sk_buff, or NULL */
+struct sk_buff *
+nf_ct_ipv4_ct_gather_frags(struct sk_buff *skb);
+
+/* call to create an explicit dependency on nf_conntrack_l3proto_ipv4. */
+extern void need_ip_conntrack(void);
+
+#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h b/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h
new file mode 100644
index 0000000..86591af
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h
@@ -0,0 +1,27 @@
+/*
+ * ICMPv6 tracking.
+ *
+ * 21 Apl 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- separated from nf_conntrack_icmp.h
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_icmp.h
+ */
+
+#ifndef _NF_CONNTRACK_ICMPV6_H
+#define _NF_CONNTRACK_ICMPV6_H
+#include <asm/atomic.h>
+
+#ifndef ICMPV6_NI_QUERY
+#define ICMPV6_NI_QUERY 139
+#endif
+#ifndef ICMPV6_NI_REPLY
+#define ICMPV6_NI_REPLY 140
+#endif
+
+struct nf_ct_icmpv6
+{
+	/* Optimization: when number in == number out, forget immediately. */
+	atomic_t count;
+};
+
+#endif /* _NF_CONNTRACK_ICMPV6_H */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
new file mode 100644
index 0000000..cc48256
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack.h
@@ -0,0 +1,354 @@
+/*
+ * Connection state tracking for netfilter.  This is separated from,
+ * but required by, the (future) NAT layer; it can also be used by an iptables
+ * extension.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack.h
+ */
+
+#ifndef _NF_CONNTRACK_H
+#define _NF_CONNTRACK_H
+
+#include <linux/netfilter/nf_conntrack_common.h>
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <asm/atomic.h>
+
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/netfilter/nf_conntrack_sctp.h>
+#include <net/netfilter/ipv4/nf_conntrack_icmp.h>
+#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
+
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+/* per conntrack: protocol private data */
+union nf_conntrack_proto {
+	/* insert conntrack proto private data here */
+	struct ip_ct_sctp sctp;
+	struct ip_ct_tcp tcp;
+	struct ip_ct_icmp icmp;
+	struct nf_ct_icmpv6 icmpv6;
+};
+
+union nf_conntrack_expect_proto {
+	/* insert expect proto private data here */
+};
+
+/* Add protocol helper include file here */
+#include <linux/netfilter/nf_conntrack_ftp.h>
+
+/* per conntrack: application helper private data */
+union nf_conntrack_help {
+	/* insert conntrack helper private data (master) here */
+	struct ip_ct_ftp_master ct_ftp_info;
+};
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#ifdef CONFIG_NETFILTER_DEBUG
+#define NF_CT_ASSERT(x)							\
+do {									\
+	if (!(x))							\
+		/* Wooah!  I'm tripping my conntrack in a frenzy of	\
+		   netplay... */					\
+		printk("NF_CT_ASSERT: %s:%i(%s)\n",			\
+		       __FILE__, __LINE__, __FUNCTION__);		\
+} while(0)
+#else
+#define NF_CT_ASSERT(x)
+#endif
+
+struct nf_conntrack_helper;
+
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+struct nf_conn
+{
+	/* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
+           plus 1 for any connection(s) we are `master' for */
+	struct nf_conntrack ct_general;
+
+	/* XXX should I move this to the tail ? - Y.K */
+	/* These are my tuples; original and reply */
+	struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
+
+	/* Have we seen traffic both ways yet? (bitset) */
+	unsigned long status;
+
+	/* Timer function; drops refcnt when it goes off. */
+	struct timer_list timeout;
+
+#ifdef CONFIG_NF_CT_ACCT
+	/* Accounting Information (same cache line as other written members) */
+	struct ip_conntrack_counter counters[IP_CT_DIR_MAX];
+#endif
+	/* If we were expected by an expectation, this will be it */
+	struct nf_conn *master;
+	
+	/* Current number of expected connections */
+	unsigned int expecting;
+
+	/* Helper. if any */
+	struct nf_conntrack_helper *helper;
+
+	/* features - nat, helper, ... used by allocating system */
+	u_int32_t features;
+
+	/* Storage reserved for other modules: */
+
+	union nf_conntrack_proto proto;
+
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+	u_int32_t mark;
+#endif
+
+	/* These members are dynamically allocated. */
+
+	union nf_conntrack_help *help;
+
+	/* Layer 3 dependent members. (ex: NAT) */
+	union {
+		struct nf_conntrack_ipv4 *ipv4;
+	} l3proto;
+	void *data[0];
+};
+
+struct nf_conntrack_expect
+{
+	/* Internal linked list (global expectation list) */
+	struct list_head list;
+
+	/* We expect this tuple, with the following mask */
+	struct nf_conntrack_tuple tuple, mask;
+ 
+	/* Function to call after setup and insertion */
+	void (*expectfn)(struct nf_conn *new,
+			 struct nf_conntrack_expect *this);
+
+	/* The conntrack of the master connection */
+	struct nf_conn *master;
+
+	/* Timer function; deletes the expectation. */
+	struct timer_list timeout;
+
+	/* Usage count. */
+	atomic_t use;
+
+	/* Flags */
+	unsigned int flags;
+
+#ifdef CONFIG_NF_NAT_NEEDED
+	/* This is the original per-proto part, used to map the
+	 * expected connection the way the recipient expects. */
+	union nf_conntrack_manip_proto saved_proto;
+	/* Direction relative to the master connection. */
+	enum ip_conntrack_dir dir;
+#endif
+};
+
+#define NF_CT_EXPECT_PERMANENT 0x1
+
+static inline struct nf_conn *
+nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
+{
+	return container_of(hash, struct nf_conn,
+			    tuplehash[hash->tuple.dst.dir]);
+}
+
+/* get master conntrack via master expectation */
+#define master_ct(conntr) (conntr->master)
+
+/* Alter reply tuple (maybe alter helper). */
+extern void
+nf_conntrack_alter_reply(struct nf_conn *conntrack,
+			 const struct nf_conntrack_tuple *newreply);
+
+/* Is this tuple taken? (ignoring any belonging to the given
+   conntrack). */
+extern int
+nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+			 const struct nf_conn *ignored_conntrack);
+
+/* Return conntrack_info and tuple hash for given skb. */
+static inline struct nf_conn *
+nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
+{
+	*ctinfo = skb->nfctinfo;
+	return (struct nf_conn *)skb->nfct;
+}
+
+/* decrement reference count on a conntrack */
+static inline void nf_ct_put(struct nf_conn *ct)
+{
+	NF_CT_ASSERT(ct);
+	nf_conntrack_put(&ct->ct_general);
+}
+
+/* call to create an explicit dependency on nf_conntrack. */
+extern void need_nf_conntrack(void);
+
+extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
+				const struct nf_conntrack_tuple *orig);
+
+extern void __nf_ct_refresh_acct(struct nf_conn *ct,
+				 enum ip_conntrack_info ctinfo,
+				 const struct sk_buff *skb,
+				 unsigned long extra_jiffies,
+				 int do_acct);
+
+/* Refresh conntrack for this many jiffies and do accounting */
+static inline void nf_ct_refresh_acct(struct nf_conn *ct,
+				      enum ip_conntrack_info ctinfo,
+				      const struct sk_buff *skb,
+				      unsigned long extra_jiffies)
+{
+	__nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1);
+}
+
+/* Refresh conntrack for this many jiffies */
+static inline void nf_ct_refresh(struct nf_conn *ct,
+				 const struct sk_buff *skb,
+				 unsigned long extra_jiffies)
+{
+	__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
+}
+
+/* These are for NAT.  Icky. */
+/* Update TCP window tracking data when NAT mangles the packet */
+extern void nf_conntrack_tcp_update(struct sk_buff *skb,
+				    unsigned int dataoff,
+				    struct nf_conn *conntrack,
+				    int dir);
+
+/* Call me when a conntrack is destroyed. */
+extern void (*nf_conntrack_destroyed)(struct nf_conn *conntrack);
+
+/* Fake conntrack entry for untracked connections */
+extern struct nf_conn nf_conntrack_untracked;
+
+extern int nf_ct_no_defrag;
+
+/* Iterate over all conntracks: if iter returns true, it's deleted. */
+extern void
+nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data);
+extern void nf_conntrack_free(struct nf_conn *ct);
+extern struct nf_conn *
+nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
+		   const struct nf_conntrack_tuple *repl);
+
+/* It's confirmed if it is, or has been in the hash table. */
+static inline int nf_ct_is_confirmed(struct nf_conn *ct)
+{
+	return test_bit(IPS_CONFIRMED_BIT, &ct->status);
+}
+
+static inline int nf_ct_is_dying(struct nf_conn *ct)
+{
+	return test_bit(IPS_DYING_BIT, &ct->status);
+}
+
+extern unsigned int nf_conntrack_htable_size;
+
+#define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++)
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+
+struct nf_conntrack_ecache {
+	struct nf_conn *ct;
+	unsigned int events;
+};
+DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
+
+#define CONNTRACK_ECACHE(x)	(__get_cpu_var(nf_conntrack_ecache).x)
+
+extern struct notifier_block *nf_conntrack_chain;
+extern struct notifier_block *nf_conntrack_expect_chain;
+
+static inline int nf_conntrack_register_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_register(&nf_conntrack_chain, nb);
+}
+
+static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_unregister(&nf_conntrack_chain, nb);
+}
+
+static inline int
+nf_conntrack_expect_register_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_register(&nf_conntrack_expect_chain, nb);
+}
+
+static inline int
+nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_unregister(&nf_conntrack_expect_chain, nb);
+}
+
+extern void nf_ct_deliver_cached_events(const struct nf_conn *ct);
+extern void __nf_ct_event_cache_init(struct nf_conn *ct);
+
+static inline void
+nf_conntrack_event_cache(enum ip_conntrack_events event,
+			 const struct sk_buff *skb)
+{
+	struct nf_conn *ct = (struct nf_conn *)skb->nfct;
+	struct nf_conntrack_ecache *ecache;
+
+	local_bh_disable();
+	ecache = &__get_cpu_var(nf_conntrack_ecache);
+	if (ct != ecache->ct)
+		__nf_ct_event_cache_init(ct);
+	ecache->events |= event;
+	local_bh_enable();
+}
+
+static inline void nf_conntrack_event(enum ip_conntrack_events event,
+				      struct nf_conn *ct)
+{
+	if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
+		notifier_call_chain(&nf_conntrack_chain, event, ct);
+}
+
+static inline void
+nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
+			  struct nf_conntrack_expect *exp)
+{
+	notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
+}
+#else /* CONFIG_NF_CONNTRACK_EVENTS */
+static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
+					    const struct sk_buff *skb) {}
+static inline void nf_conntrack_event(enum ip_conntrack_events event,
+				      struct nf_conn *ct) {}
+static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
+static inline void
+nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
+			  struct nf_conntrack_expect *exp) {}
+#endif /* CONFIG_NF_CONNTRACK_EVENTS */
+
+/* no helper, no nat */
+#define	NF_CT_F_BASIC	0
+/* for helper */
+#define	NF_CT_F_HELP	1
+/* for nat. */
+#define	NF_CT_F_NAT	2
+#define NF_CT_F_NUM	4
+
+extern int
+nf_conntrack_register_cache(u_int32_t features, const char *name, size_t size,
+			    int (*init_conntrack)(struct nf_conn *, u_int32_t));
+extern void
+nf_conntrack_unregister_cache(u_int32_t features);
+
+#endif /* __KERNEL__ */
+#endif /* _NF_CONNTRACK_H */
diff --git a/include/net/netfilter/nf_conntrack_compat.h b/include/net/netfilter/nf_conntrack_compat.h
new file mode 100644
index 0000000..3cac19f
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_compat.h
@@ -0,0 +1,108 @@
+#ifndef _NF_CONNTRACK_COMPAT_H
+#define _NF_CONNTRACK_COMPAT_H
+
+#ifdef __KERNEL__
+
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
+
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+
+#ifdef CONFIG_IP_NF_CONNTRACK_MARK
+static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb,
+					u_int32_t *ctinfo)
+{
+	struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo);
+
+	if (ct)
+		return &ct->mark;
+	else
+		return NULL;
+}
+#endif /* CONFIG_IP_NF_CONNTRACK_MARK */
+
+#ifdef CONFIG_IP_NF_CT_ACCT
+static inline struct ip_conntrack_counter *
+nf_ct_get_counters(const struct sk_buff *skb)
+{
+	enum ip_conntrack_info ctinfo;
+	struct ip_conntrack *ct = ip_conntrack_get(skb, &ctinfo);
+
+	if (ct)
+		return ct->counters;
+	else
+		return NULL;
+}
+#endif /* CONFIG_IP_NF_CT_ACCT */
+
+static inline int nf_ct_is_untracked(const struct sk_buff *skb)
+{
+	return (skb->nfct == &ip_conntrack_untracked.ct_general);
+}
+
+static inline void nf_ct_untrack(struct sk_buff *skb)
+{
+	skb->nfct = &ip_conntrack_untracked.ct_general;
+}
+
+static inline int nf_ct_get_ctinfo(const struct sk_buff *skb,
+				   enum ip_conntrack_info *ctinfo)
+{
+	struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo);
+	return (ct != NULL);
+}
+
+#else /* CONFIG_IP_NF_CONNTRACK */
+
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+#include <net/netfilter/nf_conntrack.h>
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+
+static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb,
+					u_int32_t *ctinfo)
+{
+	struct nf_conn *ct = nf_ct_get(skb, ctinfo);
+
+	if (ct)
+		return &ct->mark;
+	else
+		return NULL;
+}
+#endif /* CONFIG_NF_CONNTRACK_MARK */
+
+#ifdef CONFIG_NF_CT_ACCT
+static inline struct ip_conntrack_counter *
+nf_ct_get_counters(const struct sk_buff *skb)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+	if (ct)
+		return ct->counters;
+	else
+		return NULL;
+}
+#endif /* CONFIG_NF_CT_ACCT */
+
+static inline int nf_ct_is_untracked(const struct sk_buff *skb)
+{
+	return (skb->nfct == &nf_conntrack_untracked.ct_general);
+}
+
+static inline void nf_ct_untrack(struct sk_buff *skb)
+{
+	skb->nfct = &nf_conntrack_untracked.ct_general;
+}
+
+static inline int nf_ct_get_ctinfo(const struct sk_buff *skb,
+				   enum ip_conntrack_info *ctinfo)
+{
+	struct nf_conn *ct = nf_ct_get(skb, ctinfo);
+	return (ct != NULL);
+}
+
+#endif /* CONFIG_IP_NF_CONNTRACK */
+
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_COMPAT_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
new file mode 100644
index 0000000..da25452
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -0,0 +1,76 @@
+/*
+ * This header is used to share core functionality between the
+ * standalone connection tracking module, and the compatibility layer's use
+ * of connection tracking.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_core.h
+ */
+
+#ifndef _NF_CONNTRACK_CORE_H
+#define _NF_CONNTRACK_CORE_H
+
+#include <linux/netfilter.h>
+
+/* This header is used to share core functionality between the
+   standalone connection tracking module, and the compatibility layer's use
+   of connection tracking. */
+extern unsigned int nf_conntrack_in(int pf,
+				    unsigned int hooknum,
+				    struct sk_buff **pskb);
+
+extern int nf_conntrack_init(void);
+extern void nf_conntrack_cleanup(void);
+
+struct nf_conntrack_l3proto;
+extern struct nf_conntrack_l3proto *nf_ct_find_l3proto(u_int16_t pf);
+/* Like above, but you already have conntrack read lock. */
+extern struct nf_conntrack_l3proto *__nf_ct_find_l3proto(u_int16_t l3proto);
+
+struct nf_conntrack_protocol;
+
+extern int
+nf_ct_get_tuple(const struct sk_buff *skb,
+		unsigned int nhoff,
+		unsigned int dataoff,
+		u_int16_t l3num,
+		u_int8_t protonum,
+		struct nf_conntrack_tuple *tuple,
+		const struct nf_conntrack_l3proto *l3proto,
+		const struct nf_conntrack_protocol *protocol);
+
+extern int
+nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+		   const struct nf_conntrack_tuple *orig,
+		   const struct nf_conntrack_l3proto *l3proto,
+		   const struct nf_conntrack_protocol *protocol);
+
+/* Find a connection corresponding to a tuple. */
+extern struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple,
+		      const struct nf_conn *ignored_conntrack);
+
+extern int __nf_conntrack_confirm(struct sk_buff **pskb);
+
+/* Confirm a connection: returns NF_DROP if packet must be dropped. */
+static inline int nf_conntrack_confirm(struct sk_buff **pskb)
+{
+	struct nf_conn *ct = (struct nf_conn *)(*pskb)->nfct;
+	int ret = NF_ACCEPT;
+
+	if (ct) {
+		if (!nf_ct_is_confirmed(ct))
+			ret = __nf_conntrack_confirm(pskb);
+		nf_ct_deliver_cached_events(ct);
+	}
+	return ret;
+}
+
+extern void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb);
+
+extern struct list_head *nf_conntrack_hash;
+extern struct list_head nf_conntrack_expect_list;
+extern rwlock_t nf_conntrack_lock ;
+#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
new file mode 100644
index 0000000..5a66b2a
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -0,0 +1,51 @@
+/*
+ * connection tracking helpers.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_helper.h
+ */
+
+#ifndef _NF_CONNTRACK_HELPER_H
+#define _NF_CONNTRACK_HELPER_H
+#include <net/netfilter/nf_conntrack.h>
+
+struct module;
+
+struct nf_conntrack_helper
+{	
+	struct list_head list; 		/* Internal use. */
+
+	const char *name;		/* name of the module */
+	struct module *me;		/* pointer to self */
+	unsigned int max_expected;	/* Maximum number of concurrent 
+					 * expected connections */
+	unsigned int timeout;		/* timeout for expecteds */
+
+	/* Mask of things we will help (compared against server response) */
+	struct nf_conntrack_tuple tuple;
+	struct nf_conntrack_tuple mask;
+	
+	/* Function to call when data passes; return verdict, or -1 to
+           invalidate. */
+	int (*help)(struct sk_buff **pskb,
+		    unsigned int protoff,
+		    struct nf_conn *ct,
+		    enum ip_conntrack_info conntrackinfo);
+};
+
+extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
+extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
+
+/* Allocate space for an expectation: this is mandatory before calling
+   nf_conntrack_expect_related.  You will have to call put afterwards. */
+extern struct nf_conntrack_expect *
+nf_conntrack_expect_alloc(struct nf_conn *master);
+extern void nf_conntrack_expect_put(struct nf_conntrack_expect *exp);
+
+/* Add an expected connection: can have more than one per connection */
+extern int nf_conntrack_expect_related(struct nf_conntrack_expect *exp);
+extern void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp);
+
+#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
new file mode 100644
index 0000000..01663e5
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C)2003,2004 USAGI/WIDE Project
+ *
+ * Header for use in defining a given L3 protocol for connection tracking.
+ *
+ * Author:
+ *	Yasuyuki Kozakai @USAGI	<yasuyuki.kozakai@toshiba.co.jp>
+ *
+ * Derived from include/netfilter_ipv4/ip_conntrack_protocol.h
+ */
+
+#ifndef _NF_CONNTRACK_L3PROTO_H
+#define _NF_CONNTRACK_L3PROTO_H
+#include <linux/seq_file.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct nf_conntrack_l3proto
+{
+	/* Next pointer. */
+	struct list_head list;
+
+	/* L3 Protocol Family number. ex) PF_INET */
+	u_int16_t l3proto;
+
+	/* Protocol name */
+	const char *name;
+
+	/*
+	 * Try to fill in the third arg: nhoff is offset of l3 proto
+         * hdr.  Return true if possible.
+	 */
+	int (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
+			    struct nf_conntrack_tuple *tuple);
+
+	/*
+	 * Invert the per-proto part of the tuple: ie. turn xmit into reply.
+	 * Some packets can't be inverted: return 0 in that case.
+	 */
+	int (*invert_tuple)(struct nf_conntrack_tuple *inverse,
+			    const struct nf_conntrack_tuple *orig);
+
+	/* Print out the per-protocol part of the tuple. */
+	int (*print_tuple)(struct seq_file *s,
+			   const struct nf_conntrack_tuple *);
+
+	/* Print out the private part of the conntrack. */
+	int (*print_conntrack)(struct seq_file *s, const struct nf_conn *);
+
+	/* Returns verdict for packet, or -1 for invalid. */
+	int (*packet)(struct nf_conn *conntrack,
+		      const struct sk_buff *skb,
+		      enum ip_conntrack_info ctinfo);
+
+	/*
+	 * Called when a new connection for this protocol found;
+	 * returns TRUE if it's OK.  If so, packet() called next.
+	 */
+	int (*new)(struct nf_conn *conntrack, const struct sk_buff *skb);
+
+	/* Called when a conntrack entry is destroyed */
+	void (*destroy)(struct nf_conn *conntrack);
+
+	/*
+	 * Called before tracking. 
+	 *	*dataoff: offset of protocol header (TCP, UDP,...) in *pskb
+	 *	*protonum: protocol number
+	 */
+	int (*prepare)(struct sk_buff **pskb, unsigned int hooknum,
+		       unsigned int *dataoff, u_int8_t *protonum);
+
+	u_int32_t (*get_features)(const struct nf_conntrack_tuple *tuple);
+
+	/* Module (if any) which this is connected to. */
+	struct module *me;
+};
+
+extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
+
+/* Protocol registration. */
+extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
+extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
+
+static inline struct nf_conntrack_l3proto *
+nf_ct_find_l3proto(u_int16_t l3proto)
+{
+	return nf_ct_l3protos[l3proto];
+}
+
+/* Existing built-in protocols */
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
+extern struct nf_conntrack_l3proto nf_conntrack_generic_l3proto;
+#endif /*_NF_CONNTRACK_L3PROTO_H*/
diff --git a/include/net/netfilter/nf_conntrack_protocol.h b/include/net/netfilter/nf_conntrack_protocol.h
new file mode 100644
index 0000000..b3afda3
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_protocol.h
@@ -0,0 +1,105 @@
+/*
+ * Header for use in defining a given protocol for connection tracking.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- generalized L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_protcol.h
+ */
+
+#ifndef _NF_CONNTRACK_PROTOCOL_H
+#define _NF_CONNTRACK_PROTOCOL_H
+#include <net/netfilter/nf_conntrack.h>
+
+struct seq_file;
+
+struct nf_conntrack_protocol
+{
+	/* Next pointer. */
+	struct list_head list;
+
+	/* L3 Protocol number. */
+	u_int16_t l3proto;
+
+	/* Protocol number. */
+	u_int8_t proto;
+
+	/* Protocol name */
+	const char *name;
+
+	/* Try to fill in the third arg: dataoff is offset past network protocol
+           hdr.  Return true if possible. */
+	int (*pkt_to_tuple)(const struct sk_buff *skb,
+			    unsigned int dataoff,
+			    struct nf_conntrack_tuple *tuple);
+
+	/* Invert the per-proto part of the tuple: ie. turn xmit into reply.
+	 * Some packets can't be inverted: return 0 in that case.
+	 */
+	int (*invert_tuple)(struct nf_conntrack_tuple *inverse,
+			    const struct nf_conntrack_tuple *orig);
+
+	/* Print out the per-protocol part of the tuple. Return like seq_* */
+	int (*print_tuple)(struct seq_file *s,
+			   const struct nf_conntrack_tuple *);
+
+	/* Print out the private part of the conntrack. */
+	int (*print_conntrack)(struct seq_file *s, const struct nf_conn *);
+
+	/* Returns verdict for packet, or -1 for invalid. */
+	int (*packet)(struct nf_conn *conntrack,
+		      const struct sk_buff *skb,
+		      unsigned int dataoff,
+		      enum ip_conntrack_info ctinfo,
+		      int pf,
+		      unsigned int hooknum);
+
+	/* Called when a new connection for this protocol found;
+	 * returns TRUE if it's OK.  If so, packet() called next. */
+	int (*new)(struct nf_conn *conntrack, const struct sk_buff *skb,
+		   unsigned int dataoff);
+
+	/* Called when a conntrack entry is destroyed */
+	void (*destroy)(struct nf_conn *conntrack);
+
+	int (*error)(struct sk_buff *skb, unsigned int dataoff,
+		     enum ip_conntrack_info *ctinfo,
+		     int pf, unsigned int hooknum);
+
+	/* Module (if any) which this is connected to. */
+	struct module *me;
+};
+
+/* Existing built-in protocols */
+extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp6;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_udp4;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_udp6;
+extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
+
+#define MAX_NF_CT_PROTO 256
+extern struct nf_conntrack_protocol **nf_ct_protos[PF_MAX];
+
+extern struct nf_conntrack_protocol *
+nf_ct_find_proto(u_int16_t l3proto, u_int8_t protocol);
+
+/* Protocol registration. */
+extern int nf_conntrack_protocol_register(struct nf_conntrack_protocol *proto);
+extern void nf_conntrack_protocol_unregister(struct nf_conntrack_protocol *proto);
+
+/* Log invalid packets */
+extern unsigned int nf_ct_log_invalid;
+
+#ifdef CONFIG_SYSCTL
+#ifdef DEBUG_INVALID_PACKETS
+#define LOG_INVALID(proto) \
+	(nf_ct_log_invalid == (proto) || nf_ct_log_invalid == IPPROTO_RAW)
+#else
+#define LOG_INVALID(proto) \
+	((nf_ct_log_invalid == (proto) || nf_ct_log_invalid == IPPROTO_RAW) \
+	 && net_ratelimit())
+#endif
+#else
+#define LOG_INVALID(proto) 0
+#endif /* CONFIG_SYSCTL */
+
+#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
new file mode 100644
index 0000000..14ce790
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -0,0 +1,190 @@
+/*
+ * Definitions and Declarations for tuple.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_tuple.h
+ */
+
+#ifndef _NF_CONNTRACK_TUPLE_H
+#define _NF_CONNTRACK_TUPLE_H
+
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+
+/* A `tuple' is a structure containing the information to uniquely
+  identify a connection.  ie. if two packets have the same tuple, they
+  are in the same connection; if not, they are not.
+
+  We divide the structure along "manipulatable" and
+  "non-manipulatable" lines, for the benefit of the NAT code.
+*/
+
+#define NF_CT_TUPLE_L3SIZE	4
+
+/* The l3 protocol-specific manipulable parts of the tuple: always in
+   network order! */
+union nf_conntrack_man_l3proto {
+	u_int32_t all[NF_CT_TUPLE_L3SIZE];
+	u_int32_t ip;
+	u_int32_t ip6[4];
+};
+
+/* The protocol-specific manipulable parts of the tuple: always in
+   network order! */
+union nf_conntrack_man_proto
+{
+	/* Add other protocols here. */
+	u_int16_t all;
+
+	struct {
+		u_int16_t port;
+	} tcp;
+	struct {
+		u_int16_t port;
+	} udp;
+	struct {
+		u_int16_t id;
+	} icmp;
+	struct {
+		u_int16_t port;
+	} sctp;
+};
+
+/* The manipulable part of the tuple. */
+struct nf_conntrack_man
+{
+	union nf_conntrack_man_l3proto u3;
+	union nf_conntrack_man_proto u;
+	/* Layer 3 protocol */
+	u_int16_t l3num;
+};
+
+/* This contains the information to distinguish a connection. */
+struct nf_conntrack_tuple
+{
+	struct nf_conntrack_man src;
+
+	/* These are the parts of the tuple which are fixed. */
+	struct {
+		union {
+			u_int32_t all[NF_CT_TUPLE_L3SIZE];
+			u_int32_t ip;
+			u_int32_t ip6[4];
+		} u3;
+		union {
+			/* Add other protocols here. */
+			u_int16_t all;
+
+			struct {
+				u_int16_t port;
+			} tcp;
+			struct {
+				u_int16_t port;
+			} udp;
+			struct {
+				u_int8_t type, code;
+			} icmp;
+			struct {
+				u_int16_t port;
+			} sctp;
+		} u;
+
+		/* The protocol. */
+		u_int8_t protonum;
+
+		/* The direction (for tuplehash) */
+		u_int8_t dir;
+	} dst;
+};
+
+/* This is optimized opposed to a memset of the whole structure.  Everything we
+ * really care about is the  source/destination unions */
+#define NF_CT_TUPLE_U_BLANK(tuple)                              	\
+        do {                                                    	\
+                (tuple)->src.u.all = 0;                         	\
+                (tuple)->dst.u.all = 0;                         	\
+		memset(&(tuple)->src.u3, 0, sizeof((tuple)->src.u3));	\
+		memset(&(tuple)->dst.u3, 0, sizeof((tuple)->dst.u3));	\
+        } while (0)
+
+#ifdef __KERNEL__
+
+#define NF_CT_DUMP_TUPLE(tp)						    \
+DEBUGP("tuple %p: %u %u %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x %hu -> %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x %hu\n",					    \
+	(tp), (tp)->src.l3num, (tp)->dst.protonum,			    \
+	NIP6(*(struct in6_addr *)(tp)->src.u3.all), ntohs((tp)->src.u.all), \
+	NIP6(*(struct in6_addr *)(tp)->dst.u3.all), ntohs((tp)->dst.u.all))
+
+/* If we're the first tuple, it's the original dir. */
+#define NF_CT_DIRECTION(h)						\
+	((enum ip_conntrack_dir)(h)->tuple.dst.dir)
+
+/* Connections have two entries in the hash table: one for each way */
+struct nf_conntrack_tuple_hash
+{
+	struct list_head list;
+
+	struct nf_conntrack_tuple tuple;
+};
+
+#endif /* __KERNEL__ */
+
+static inline int nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
+				        const struct nf_conntrack_tuple *t2)
+{ 
+	return (t1->src.u3.all[0] == t2->src.u3.all[0] &&
+		t1->src.u3.all[1] == t2->src.u3.all[1] &&
+		t1->src.u3.all[2] == t2->src.u3.all[2] &&
+		t1->src.u3.all[3] == t2->src.u3.all[3] &&
+		t1->src.u.all == t2->src.u.all &&
+		t1->src.l3num == t2->src.l3num &&
+		t1->dst.protonum == t2->dst.protonum);
+}
+
+static inline int nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1,
+				        const struct nf_conntrack_tuple *t2)
+{
+	return (t1->dst.u3.all[0] == t2->dst.u3.all[0] &&
+		t1->dst.u3.all[1] == t2->dst.u3.all[1] &&
+		t1->dst.u3.all[2] == t2->dst.u3.all[2] &&
+		t1->dst.u3.all[3] == t2->dst.u3.all[3] &&
+		t1->dst.u.all == t2->dst.u.all &&
+		t1->src.l3num == t2->src.l3num &&
+		t1->dst.protonum == t2->dst.protonum);
+}
+
+static inline int nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
+				    const struct nf_conntrack_tuple *t2)
+{
+	return nf_ct_tuple_src_equal(t1, t2) && nf_ct_tuple_dst_equal(t1, t2);
+}
+
+static inline int nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
+				       const struct nf_conntrack_tuple *tuple,
+				       const struct nf_conntrack_tuple *mask)
+{
+	int count = 0;
+
+        for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+                if ((t->src.u3.all[count] ^ tuple->src.u3.all[count]) &
+                    mask->src.u3.all[count])
+                        return 0;
+        }
+
+        for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+                if ((t->dst.u3.all[count] ^ tuple->dst.u3.all[count]) &
+                    mask->dst.u3.all[count])
+                        return 0;
+        }
+
+        if ((t->src.u.all ^ tuple->src.u.all) & mask->src.u.all ||
+            (t->dst.u.all ^ tuple->dst.u.all) & mask->dst.u.all ||
+            (t->src.l3num ^ tuple->src.l3num) & mask->src.l3num ||
+            (t->dst.protonum ^ tuple->dst.protonum) & mask->dst.protonum)
+                return 0;
+
+        return 1;
+}
+
+#endif /* _NF_CONNTRACK_TUPLE_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
new file mode 100644
index 0000000..640c26a
--- /dev/null
+++ b/include/net/netlink.h
@@ -0,0 +1,883 @@
+#ifndef __NET_NETLINK_H
+#define __NET_NETLINK_H
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+
+/* ========================================================================
+ *         Netlink Messages and Attributes Interface (As Seen On TV)
+ * ------------------------------------------------------------------------
+ *                          Messages Interface
+ * ------------------------------------------------------------------------
+ *
+ * Message Format:
+ *    <--- nlmsg_total_size(payload)  --->
+ *    <-- nlmsg_msg_size(payload) ->
+ *   +----------+- - -+-------------+- - -+-------- - -
+ *   | nlmsghdr | Pad |   Payload   | Pad | nlmsghdr
+ *   +----------+- - -+-------------+- - -+-------- - -
+ *   nlmsg_data(nlh)---^                   ^
+ *   nlmsg_next(nlh)-----------------------+
+ *
+ * Payload Format:
+ *    <---------------------- nlmsg_len(nlh) --------------------->
+ *    <------ hdrlen ------>       <- nlmsg_attrlen(nlh, hdrlen) ->
+ *   +----------------------+- - -+--------------------------------+
+ *   |     Family Header    | Pad |           Attributes           |
+ *   +----------------------+- - -+--------------------------------+
+ *   nlmsg_attrdata(nlh, hdrlen)---^
+ *
+ * Data Structures:
+ *   struct nlmsghdr			netlink message header
+ *
+ * Message Construction:
+ *   nlmsg_new()			create a new netlink message
+ *   nlmsg_put()			add a netlink message to an skb
+ *   nlmsg_put_answer()			callback based nlmsg_put()
+ *   nlmsg_end()			finanlize netlink message
+ *   nlmsg_cancel()			cancel message construction
+ *   nlmsg_free()			free a netlink message
+ *
+ * Message Sending:
+ *   nlmsg_multicast()			multicast message to several groups
+ *   nlmsg_unicast()			unicast a message to a single socket
+ *
+ * Message Length Calculations:
+ *   nlmsg_msg_size(payload)		length of message w/o padding
+ *   nlmsg_total_size(payload)		length of message w/ padding
+ *   nlmsg_padlen(payload)		length of padding at tail
+ *
+ * Message Payload Access:
+ *   nlmsg_data(nlh)			head of message payload
+ *   nlmsg_len(nlh)			length of message payload
+ *   nlmsg_attrdata(nlh, hdrlen)	head of attributes data
+ *   nlmsg_attrlen(nlh, hdrlen)		length of attributes data
+ *
+ * Message Parsing:
+ *   nlmsg_ok(nlh, remaining)		does nlh fit into remaining bytes?
+ *   nlmsg_next(nlh, remaining)		get next netlink message
+ *   nlmsg_parse()			parse attributes of a message
+ *   nlmsg_find_attr()			find an attribute in a message
+ *   nlmsg_for_each_msg()		loop over all messages
+ *   nlmsg_validate()			validate netlink message incl. attrs
+ *   nlmsg_for_each_attr()		loop over all attributes
+ *
+ * ------------------------------------------------------------------------
+ *                          Attributes Interface
+ * ------------------------------------------------------------------------
+ *
+ * Attribute Format:
+ *    <------- nla_total_size(payload) ------->
+ *    <---- nla_attr_size(payload) ----->
+ *   +----------+- - -+- - - - - - - - - +- - -+-------- - -
+ *   |  Header  | Pad |     Payload      | Pad |  Header
+ *   +----------+- - -+- - - - - - - - - +- - -+-------- - -
+ *                     <- nla_len(nla) ->      ^
+ *   nla_data(nla)----^                        |
+ *   nla_next(nla)-----------------------------'
+ *
+ * Data Structures:
+ *   struct nlattr			netlink attribtue header
+ *
+ * Attribute Construction:
+ *   nla_reserve(skb, type, len)	reserve skb tailroom for an attribute
+ *   nla_put(skb, type, len, data)	add attribute to skb
+ *
+ * Attribute Construction for Basic Types:
+ *   nla_put_u8(skb, type, value)	add u8 attribute to skb
+ *   nla_put_u16(skb, type, value)	add u16 attribute to skb
+ *   nla_put_u32(skb, type, value)	add u32 attribute to skb
+ *   nla_put_u64(skb, type, value)	add u64 attribute to skb
+ *   nla_put_string(skb, type, str)	add string attribute to skb
+ *   nla_put_flag(skb, type)		add flag attribute to skb
+ *   nla_put_msecs(skb, type, jiffies)	add msecs attribute to skb
+ *
+ * Exceptions Based Attribute Construction:
+ *   NLA_PUT(skb, type, len, data)	add attribute to skb
+ *   NLA_PUT_U8(skb, type, value)	add u8 attribute to skb
+ *   NLA_PUT_U16(skb, type, value)	add u16 attribute to skb
+ *   NLA_PUT_U32(skb, type, value)	add u32 attribute to skb
+ *   NLA_PUT_U64(skb, type, value)	add u64 attribute to skb
+ *   NLA_PUT_STRING(skb, type, str)	add string attribute to skb
+ *   NLA_PUT_FLAG(skb, type)		add flag attribute to skb
+ *   NLA_PUT_MSECS(skb, type, jiffies)	add msecs attribute to skb
+ *
+ *   The meaning of these functions is equal to their lower case
+ *   variants but they jump to the label nla_put_failure in case
+ *   of a failure.
+ *
+ * Nested Attributes Construction:
+ *   nla_nest_start(skb, type)		start a nested attribute
+ *   nla_nest_end(skb, nla)		finalize a nested attribute
+ *   nla_nest_cancel(skb, nla)		cancel nested attribute construction
+ *
+ * Attribute Length Calculations:
+ *   nla_attr_size(payload)		length of attribute w/o padding
+ *   nla_total_size(payload)		length of attribute w/ padding
+ *   nla_padlen(payload)		length of padding
+ *
+ * Attribute Payload Access:
+ *   nla_data(nla)			head of attribute payload
+ *   nla_len(nla)			length of attribute payload
+ *
+ * Attribute Payload Access for Basic Types:
+ *   nla_get_u8(nla)			get payload for a u8 attribute
+ *   nla_get_u16(nla)			get payload for a u16 attribute
+ *   nla_get_u32(nla)			get payload for a u32 attribute
+ *   nla_get_u64(nla)			get payload for a u64 attribute
+ *   nla_get_flag(nla)			return 1 if flag is true
+ *   nla_get_msecs(nla)			get payload for a msecs attribute
+ *
+ * Attribute Misc:
+ *   nla_memcpy(dest, nla, count)	copy attribute into memory
+ *   nla_memcmp(nla, data, size)	compare attribute with memory area
+ *   nla_strlcpy(dst, nla, size)	copy attribute to a sized string
+ *   nla_strcmp(nla, str)		compare attribute with string
+ *
+ * Attribute Parsing:
+ *   nla_ok(nla, remaining)		does nla fit into remaining bytes?
+ *   nla_next(nla, remaining)		get next netlink attribute
+ *   nla_validate()			validate a stream of attributes
+ *   nla_find()				find attribute in stream of attributes
+ *   nla_parse()			parse and validate stream of attrs
+ *   nla_parse_nested()			parse nested attribuets
+ *   nla_for_each_attr()		loop over all attributes
+ *=========================================================================
+ */
+
+ /**
+  * Standard attribute types to specify validation policy
+  */
+enum {
+	NLA_UNSPEC,
+	NLA_U8,
+	NLA_U16,
+	NLA_U32,
+	NLA_U64,
+	NLA_STRING,
+	NLA_FLAG,
+	NLA_MSECS,
+	NLA_NESTED,
+	__NLA_TYPE_MAX,
+};
+
+#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
+
+/**
+ * struct nla_policy - attribute validation policy
+ * @type: Type of attribute or NLA_UNSPEC
+ * @minlen: Minimal length of payload required to be available
+ *
+ * Policies are defined as arrays of this struct, the array must be
+ * accessible by attribute type up to the highest identifier to be expected.
+ *
+ * Example:
+ * static struct nla_policy my_policy[ATTR_MAX+1] __read_mostly = {
+ * 	[ATTR_FOO] = { .type = NLA_U16 },
+ *	[ATTR_BAR] = { .type = NLA_STRING },
+ *	[ATTR_BAZ] = { .minlen = sizeof(struct mystruct) },
+ * };
+ */
+struct nla_policy {
+	u16		type;
+	u16		minlen;
+};
+
+extern void		netlink_run_queue(struct sock *sk, unsigned int *qlen,
+					  int (*cb)(struct sk_buff *,
+						    struct nlmsghdr *, int *));
+extern void		netlink_queue_skip(struct nlmsghdr *nlh,
+					   struct sk_buff *skb);
+
+extern int		nla_validate(struct nlattr *head, int len, int maxtype,
+				     struct nla_policy *policy);
+extern int		nla_parse(struct nlattr *tb[], int maxtype,
+				  struct nlattr *head, int len,
+				  struct nla_policy *policy);
+extern struct nlattr *	nla_find(struct nlattr *head, int len, int attrtype);
+extern size_t		nla_strlcpy(char *dst, const struct nlattr *nla,
+				    size_t dstsize);
+extern int		nla_memcpy(void *dest, struct nlattr *src, int count);
+extern int		nla_memcmp(const struct nlattr *nla, const void *data,
+				   size_t size);
+extern int		nla_strcmp(const struct nlattr *nla, const char *str);
+extern struct nlattr *	__nla_reserve(struct sk_buff *skb, int attrtype,
+				      int attrlen);
+extern struct nlattr *	nla_reserve(struct sk_buff *skb, int attrtype,
+				    int attrlen);
+extern void		__nla_put(struct sk_buff *skb, int attrtype,
+				  int attrlen, const void *data);
+extern int		nla_put(struct sk_buff *skb, int attrtype,
+				int attrlen, const void *data);
+
+/**************************************************************************
+ * Netlink Messages
+ **************************************************************************/
+
+/**
+ * nlmsg_msg_size - length of netlink message not including padding
+ * @payload: length of message payload
+ */
+static inline int nlmsg_msg_size(int payload)
+{
+	return NLMSG_HDRLEN + payload;
+}
+
+/**
+ * nlmsg_total_size - length of netlink message including padding
+ * @payload: length of message payload
+ */
+static inline int nlmsg_total_size(int payload)
+{
+	return NLMSG_ALIGN(nlmsg_msg_size(payload));
+}
+
+/**
+ * nlmsg_padlen - length of padding at the message's tail
+ * @payload: length of message payload
+ */
+static inline int nlmsg_padlen(int payload)
+{
+	return nlmsg_total_size(payload) - nlmsg_msg_size(payload);
+}
+
+/**
+ * nlmsg_data - head of message payload
+ * @nlh: netlink messsage header
+ */
+static inline void *nlmsg_data(const struct nlmsghdr *nlh)
+{
+	return (unsigned char *) nlh + NLMSG_HDRLEN;
+}
+
+/**
+ * nlmsg_len - length of message payload
+ * @nlh: netlink message header
+ */
+static inline int nlmsg_len(const struct nlmsghdr *nlh)
+{
+	return nlh->nlmsg_len - NLMSG_HDRLEN;
+}
+
+/**
+ * nlmsg_attrdata - head of attributes data
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ */
+static inline struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh,
+					    int hdrlen)
+{
+	unsigned char *data = nlmsg_data(nlh);
+	return (struct nlattr *) (data + NLMSG_ALIGN(hdrlen));
+}
+
+/**
+ * nlmsg_attrlen - length of attributes data
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ */
+static inline int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen)
+{
+	return nlmsg_len(nlh) - NLMSG_ALIGN(hdrlen);
+}
+
+/**
+ * nlmsg_ok - check if the netlink message fits into the remaining bytes
+ * @nlh: netlink message header
+ * @remaining: number of bytes remaining in message stream
+ */
+static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
+{
+	return (remaining >= sizeof(struct nlmsghdr) &&
+		nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
+		nlh->nlmsg_len <= remaining);
+}
+
+/**
+ * nlmsg_next - next netlink message in message stream
+ * @nlh: netlink message header
+ * @remaining: number of bytes remaining in message stream
+ *
+ * Returns the next netlink message in the message stream and
+ * decrements remaining by the size of the current message.
+ */
+static inline struct nlmsghdr *nlmsg_next(struct nlmsghdr *nlh, int *remaining)
+{
+	int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
+
+	*remaining -= totlen;
+
+	return (struct nlmsghdr *) ((unsigned char *) nlh + totlen);
+}
+
+/**
+ * nlmsg_parse - parse attributes of a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ *
+ * See nla_parse()
+ */
+static inline int nlmsg_parse(struct nlmsghdr *nlh, int hdrlen,
+			      struct nlattr *tb[], int maxtype,
+			      struct nla_policy *policy)
+{
+	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+		return -EINVAL;
+
+	return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
+			 nlmsg_attrlen(nlh, hdrlen), policy);
+}
+
+/**
+ * nlmsg_find_attr - find a specific attribute in a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of familiy specific header
+ * @attrtype: type of attribute to look for
+ *
+ * Returns the first attribute which matches the specified type.
+ */
+static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
+					     int hdrlen, int attrtype)
+{
+	return nla_find(nlmsg_attrdata(nlh, hdrlen),
+			nlmsg_attrlen(nlh, hdrlen), attrtype);
+}
+
+/**
+ * nlmsg_validate - validate a netlink message including attributes
+ * @nlh: netlinket message header
+ * @hdrlen: length of familiy specific header
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ */
+static inline int nlmsg_validate(struct nlmsghdr *nlh, int hdrlen, int maxtype,
+				 struct nla_policy *policy)
+{
+	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+		return -EINVAL;
+
+	return nla_validate(nlmsg_attrdata(nlh, hdrlen),
+			    nlmsg_attrlen(nlh, hdrlen), maxtype, policy);
+}
+
+/**
+ * nlmsg_for_each_attr - iterate over a stream of attributes
+ * @pos: loop counter, set to current attribute
+ * @nlh: netlink message header
+ * @hdrlen: length of familiy specific header
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
+	nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \
+			  nlmsg_attrlen(nlh, hdrlen), rem)
+
+#if 0
+/* FIXME: Enable once all users have been converted */
+
+/**
+ * __nlmsg_put - Add a new netlink message to an skb
+ * @skb: socket buffer to store message in
+ * @pid: netlink process id
+ * @seq: sequence number of message
+ * @type: message type
+ * @payload: length of message payload
+ * @flags: message flags
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for both the netlink header and payload.
+ */
+static inline struct nlmsghdr *__nlmsg_put(struct sk_buff *skb, u32 pid,
+					   u32 seq, int type, int payload,
+					   int flags)
+{
+	struct nlmsghdr *nlh;
+
+	nlh = (struct nlmsghdr *) skb_put(skb, nlmsg_total_size(payload));
+	nlh->nlmsg_type = type;
+	nlh->nlmsg_len = nlmsg_msg_size(payload);
+	nlh->nlmsg_flags = flags;
+	nlh->nlmsg_pid = pid;
+	nlh->nlmsg_seq = seq;
+
+	memset((unsigned char *) nlmsg_data(nlh) + payload, 0,
+	       nlmsg_padlen(payload));
+
+	return nlh;
+}
+#endif
+
+/**
+ * nlmsg_put - Add a new netlink message to an skb
+ * @skb: socket buffer to store message in
+ * @pid: netlink process id
+ * @seq: sequence number of message
+ * @type: message type
+ * @payload: length of message payload
+ * @flags: message flags
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the message header and payload.
+ */
+static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+					 int type, int payload, int flags)
+{
+	if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
+		return NULL;
+
+	return __nlmsg_put(skb, pid, seq, type, payload, flags);
+}
+
+/**
+ * nlmsg_put_answer - Add a new callback based netlink message to an skb
+ * @skb: socket buffer to store message in
+ * @cb: netlink callback
+ * @type: message type
+ * @payload: length of message payload
+ * @flags: message flags
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the message header and payload.
+ */
+static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
+						struct netlink_callback *cb,
+						int type, int payload,
+						int flags)
+{
+	return nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+			 type, payload, flags);
+}
+
+/**
+ * nlmsg_new - Allocate a new netlink message
+ * @size: maximum size of message
+ *
+ * Use NLMSG_GOODSIZE if size isn't know and you need a good default size.
+ */
+static inline struct sk_buff *nlmsg_new(int size)
+{
+	return alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+}
+
+/**
+ * nlmsg_end - Finalize a netlink message
+ * @skb: socket buffer the message is stored in
+ * @nlh: netlink message header
+ *
+ * Corrects the netlink message header to include the appeneded
+ * attributes. Only necessary if attributes have been added to
+ * the message.
+ *
+ * Returns the total data length of the skb.
+ */
+static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+	nlh->nlmsg_len = skb->tail - (unsigned char *) nlh;
+
+	return skb->len;
+}
+
+/**
+ * nlmsg_cancel - Cancel construction of a netlink message
+ * @skb: socket buffer the message is stored in
+ * @nlh: netlink message header
+ *
+ * Removes the complete netlink message including all
+ * attributes from the socket buffer again. Returns -1.
+ */
+static inline int nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+	skb_trim(skb, (unsigned char *) nlh - skb->data);
+
+	return -1;
+}
+
+/**
+ * nlmsg_free - free a netlink message
+ * @skb: socket buffer of netlink message
+ */
+static inline void nlmsg_free(struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+/**
+ * nlmsg_multicast - multicast a netlink message
+ * @sk: netlink socket to spread messages to
+ * @skb: netlink message as socket buffer
+ * @pid: own netlink pid to avoid sending to yourself
+ * @group: multicast group id
+ */
+static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
+				  u32 pid, unsigned int group)
+{
+	int err;
+
+	NETLINK_CB(skb).dst_group = group;
+
+	err = netlink_broadcast(sk, skb, pid, group, GFP_KERNEL);
+	if (err > 0)
+		err = 0;
+
+	return err;
+}
+
+/**
+ * nlmsg_unicast - unicast a netlink message
+ * @sk: netlink socket to spread message to
+ * @skb: netlink message as socket buffer
+ * @pid: netlink pid of the destination socket
+ */
+static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid)
+{
+	int err;
+
+	err = netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
+	if (err > 0)
+		err = 0;
+
+	return err;
+}
+
+/**
+ * nlmsg_for_each_msg - iterate over a stream of messages
+ * @pos: loop counter, set to current message
+ * @head: head of message stream
+ * @len: length of message stream
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nlmsg_for_each_msg(pos, head, len, rem) \
+	for (pos = head, rem = len; \
+	     nlmsg_ok(pos, rem); \
+	     pos = nlmsg_next(pos, &(rem)))
+
+/**************************************************************************
+ * Netlink Attributes
+ **************************************************************************/
+
+/**
+ * nla_attr_size - length of attribute not including padding
+ * @payload: length of payload
+ */
+static inline int nla_attr_size(int payload)
+{
+	return NLA_HDRLEN + payload;
+}
+
+/**
+ * nla_total_size - total length of attribute including padding
+ * @payload: length of payload
+ */
+static inline int nla_total_size(int payload)
+{
+	return NLA_ALIGN(nla_attr_size(payload));
+}
+
+/**
+ * nla_padlen - length of padding at the tail of attribute
+ * @payload: length of payload
+ */
+static inline int nla_padlen(int payload)
+{
+	return nla_total_size(payload) - nla_attr_size(payload);
+}
+
+/**
+ * nla_data - head of payload
+ * @nla: netlink attribute
+ */
+static inline void *nla_data(const struct nlattr *nla)
+{
+	return (char *) nla + NLA_HDRLEN;
+}
+
+/**
+ * nla_len - length of payload
+ * @nla: netlink attribute
+ */
+static inline int nla_len(const struct nlattr *nla)
+{
+	return nla->nla_len - NLA_HDRLEN;
+}
+
+/**
+ * nla_ok - check if the netlink attribute fits into the remaining bytes
+ * @nla: netlink attribute
+ * @remaining: number of bytes remaining in attribute stream
+ */
+static inline int nla_ok(const struct nlattr *nla, int remaining)
+{
+	return remaining >= sizeof(*nla) &&
+	       nla->nla_len >= sizeof(*nla) &&
+	       nla->nla_len <= remaining;
+}
+
+/**
+ * nla_next - next netlink attribte in attribute stream
+ * @nla: netlink attribute
+ * @remaining: number of bytes remaining in attribute stream
+ *
+ * Returns the next netlink attribute in the attribute stream and
+ * decrements remaining by the size of the current attribute.
+ */
+static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
+{
+	int totlen = NLA_ALIGN(nla->nla_len);
+
+	*remaining -= totlen;
+	return (struct nlattr *) ((char *) nla + totlen);
+}
+
+/**
+ * nla_parse_nested - parse nested attributes
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @nla: attribute containing the nested attributes
+ * @policy: validation policy
+ *
+ * See nla_parse()
+ */
+static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
+				   struct nlattr *nla,
+				   struct nla_policy *policy)
+{
+	return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy);
+}
+/**
+ * nla_put_u8 - Add a u16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
+{
+	return nla_put(skb, attrtype, sizeof(u8), &value);
+}
+
+/**
+ * nla_put_u16 - Add a u16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
+{
+	return nla_put(skb, attrtype, sizeof(u16), &value);
+}
+
+/**
+ * nla_put_u32 - Add a u32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
+{
+	return nla_put(skb, attrtype, sizeof(u32), &value);
+}
+
+/**
+ * nla_put_64 - Add a u64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
+{
+	return nla_put(skb, attrtype, sizeof(u64), &value);
+}
+
+/**
+ * nla_put_string - Add a string netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @str: NUL terminated string
+ */
+static inline int nla_put_string(struct sk_buff *skb, int attrtype,
+				 const char *str)
+{
+	return nla_put(skb, attrtype, strlen(str) + 1, str);
+}
+
+/**
+ * nla_put_flag - Add a flag netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ */
+static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
+{
+	return nla_put(skb, attrtype, 0, NULL);
+}
+
+/**
+ * nla_put_msecs - Add a msecs netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @jiffies: number of msecs in jiffies
+ */
+static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
+				unsigned long jiffies)
+{
+	u64 tmp = jiffies_to_msecs(jiffies);
+	return nla_put(skb, attrtype, sizeof(u64), &tmp);
+}
+
+#define NLA_PUT(skb, attrtype, attrlen, data) \
+	do { \
+		if (nla_put(skb, attrtype, attrlen, data) < 0) \
+			goto nla_put_failure; \
+	} while(0)
+
+#define NLA_PUT_TYPE(skb, type, attrtype, value) \
+	do { \
+		type __tmp = value; \
+		NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \
+	} while(0)
+
+#define NLA_PUT_U8(skb, attrtype, value) \
+	NLA_PUT_TYPE(skb, u8, attrtype, value)
+
+#define NLA_PUT_U16(skb, attrtype, value) \
+	NLA_PUT_TYPE(skb, u16, attrtype, value)
+
+#define NLA_PUT_U32(skb, attrtype, value) \
+	NLA_PUT_TYPE(skb, u32, attrtype, value)
+
+#define NLA_PUT_U64(skb, attrtype, value) \
+	NLA_PUT_TYPE(skb, u64, attrtype, value)
+
+#define NLA_PUT_STRING(skb, attrtype, value) \
+	NLA_PUT(skb, attrtype, strlen(value) + 1, value)
+
+#define NLA_PUT_FLAG(skb, attrtype, value) \
+	NLA_PUT(skb, attrtype, 0, NULL)
+
+#define NLA_PUT_MSECS(skb, attrtype, jiffies) \
+	NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies))
+
+/**
+ * nla_get_u32 - return payload of u32 attribute
+ * @nla: u32 netlink attribute
+ */
+static inline u32 nla_get_u32(struct nlattr *nla)
+{
+	return *(u32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_u16 - return payload of u16 attribute
+ * @nla: u16 netlink attribute
+ */
+static inline u16 nla_get_u16(struct nlattr *nla)
+{
+	return *(u16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_u8 - return payload of u8 attribute
+ * @nla: u8 netlink attribute
+ */
+static inline u8 nla_get_u8(struct nlattr *nla)
+{
+	return *(u8 *) nla_data(nla);
+}
+
+/**
+ * nla_get_u64 - return payload of u64 attribute
+ * @nla: u64 netlink attribute
+ */
+static inline u64 nla_get_u64(struct nlattr *nla)
+{
+	u64 tmp;
+
+	nla_memcpy(&tmp, nla, sizeof(tmp));
+
+	return tmp;
+}
+
+/**
+ * nla_get_flag - return payload of flag attribute
+ * @nla: flag netlink attribute
+ */
+static inline int nla_get_flag(struct nlattr *nla)
+{
+	return !!nla;
+}
+
+/**
+ * nla_get_msecs - return payload of msecs attribute
+ * @nla: msecs netlink attribute
+ *
+ * Returns the number of milliseconds in jiffies.
+ */
+static inline unsigned long nla_get_msecs(struct nlattr *nla)
+{
+	u64 msecs = nla_get_u64(nla);
+
+	return msecs_to_jiffies((unsigned long) msecs);
+}
+
+/**
+ * nla_nest_start - Start a new level of nested attributes
+ * @skb: socket buffer to add attributes to
+ * @attrtype: attribute type of container
+ *
+ * Returns the container attribute
+ */
+static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
+{
+	struct nlattr *start = (struct nlattr *) skb->tail;
+
+	if (nla_put(skb, attrtype, 0, NULL) < 0)
+		return NULL;
+
+	return start;
+}
+
+/**
+ * nla_nest_end - Finalize nesting of attributes
+ * @skb: socket buffer the attribtues are stored in
+ * @start: container attribute
+ *
+ * Corrects the container attribute header to include the all
+ * appeneded attributes.
+ *
+ * Returns the total data length of the skb.
+ */
+static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
+{
+	start->nla_len = skb->tail - (unsigned char *) start;
+	return skb->len;
+}
+
+/**
+ * nla_nest_cancel - Cancel nesting of attributes
+ * @skb: socket buffer the message is stored in
+ * @start: container attribute
+ *
+ * Removes the container attribute and including all nested
+ * attributes. Returns -1.
+ */
+static inline int nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
+{
+	if (start)
+		skb_trim(skb, (unsigned char *) start - skb->data);
+
+	return -1;
+}
+
+/**
+ * nla_for_each_attr - iterate over a stream of attributes
+ * @pos: loop counter, set to current attribute
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nla_for_each_attr(pos, head, len, rem) \
+	for (pos = head, rem = len; \
+	     nla_ok(pos, rem); \
+	     pos = nla_next(pos, &(rem)))
+
+#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index c24339c..96cc3b4 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/cache.h>
 #include <linux/percpu.h>
+#include <linux/skbuff.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_timewait_sock.h>
@@ -852,7 +853,7 @@
 
 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
 {
-	return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+	return __skb_checksum_complete(skb);
 }
 
 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 072f3a2..5ff1490 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -43,7 +43,7 @@
  * Increment this value if any changes that break userspace ABI
  * compatibility are made.
  */
-#define IB_USER_VERBS_ABI_VERSION	3
+#define IB_USER_VERBS_ABI_VERSION	4
 
 enum {
 	IB_USER_VERBS_CMD_GET_CONTEXT,
@@ -333,6 +333,11 @@
 struct ib_uverbs_create_qp_resp {
 	__u32 qp_handle;
 	__u32 qpn;
+	__u32 max_send_wr;
+	__u32 max_recv_wr;
+	__u32 max_send_sge;
+	__u32 max_recv_sge;
+	__u32 max_inline_data;
 };
 
 /*
@@ -552,9 +557,7 @@
 	__u32 srq_handle;
 	__u32 attr_mask;
 	__u32 max_wr;
-	__u32 max_sge;
 	__u32 srq_limit;
-	__u32 reserved;
 	__u64 driver_data[0];
 };
 
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index f72d46d..a7f4c35 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -881,7 +881,7 @@
 						struct ib_ucontext *context,
 						struct ib_udata *udata);
 	int                        (*destroy_cq)(struct ib_cq *cq);
-	int                        (*resize_cq)(struct ib_cq *cq, int *cqe);
+	int                        (*resize_cq)(struct ib_cq *cq, int cqe);
 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
 					      struct ib_wc *wc);
 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
diff --git a/kernel/sched.c b/kernel/sched.c
index ac3f5cc..b650667 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -815,7 +815,8 @@
 	}
 #endif
 
-	p->prio = recalc_task_prio(p, now);
+	if (!rt_task(p))
+		p->prio = recalc_task_prio(p, now);
 
 	/*
 	 * This checks to make sure it's not an uninterruptible task
diff --git a/kernel/signal.c b/kernel/signal.c
index 1bf3c39..80789a5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1499,7 +1499,7 @@
 
 	psig = tsk->parent->sighand;
 	spin_lock_irqsave(&psig->siglock, flags);
-	if (sig == SIGCHLD &&
+	if (!tsk->ptrace && sig == SIGCHLD &&
 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
 		/*
diff --git a/kernel/sys.c b/kernel/sys.c
index c43b3e2..bce933e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1497,6 +1497,8 @@
 
 DECLARE_RWSEM(uts_sem);
 
+EXPORT_SYMBOL(uts_sem);
+
 asmlinkage long sys_newuname(struct new_utsname __user * name)
 {
 	int errno = 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ff81b5c..987225b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1330,7 +1330,7 @@
 		} else
 			printk("\n");
 
-		for_each_cpu(cpu) {
+		for_each_online_cpu(cpu) {
 			struct per_cpu_pageset *pageset;
 
 			pageset = zone_pcp(zone, cpu);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index d219435..1bcfef5 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -350,6 +350,20 @@
 	return -EFAULT;
 }
 
+unsigned int __skb_checksum_complete(struct sk_buff *skb)
+{
+	unsigned int sum;
+
+	sum = (u16)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+	if (likely(!sum)) {
+		if (unlikely(skb->ip_summed == CHECKSUM_HW))
+			netdev_rx_csum_fault(skb->dev);
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+	return sum;
+}
+EXPORT_SYMBOL(__skb_checksum_complete);
+
 /**
  *	skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
  *	@skb: skbuff
@@ -363,7 +377,7 @@
  *		 -EFAULT - fault during copy. Beware, in this case iovec
  *			   can be modified!
  */
-int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb,
+int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
 				     int hlen, struct iovec *iov)
 {
 	unsigned int csum;
@@ -376,8 +390,7 @@
 		iov++;
 
 	if (iov->iov_len < chunk) {
-		if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk + hlen,
-							   skb->csum)))
+		if (__skb_checksum_complete(skb))
 			goto csum_error;
 		if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
 			goto fault;
@@ -388,6 +401,8 @@
 			goto fault;
 		if ((unsigned short)csum_fold(csum))
 			goto csum_error;
+		if (unlikely(skb->ip_summed == CHECKSUM_HW))
+			netdev_rx_csum_fault(skb->dev);
 		iov->iov_len -= chunk;
 		iov->iov_base += chunk;
 	}
diff --git a/net/core/dev.c b/net/core/dev.c
index 8d15415..0b48e29 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1108,6 +1108,18 @@
 	return ret;
 }
 
+/* Take action when hardware reception checksum errors are detected. */
+#ifdef CONFIG_BUG
+void netdev_rx_csum_fault(struct net_device *dev)
+{
+	if (net_ratelimit()) {
+		printk(KERN_ERR "%s: hw csum failure.\n", dev->name);
+		dump_stack();
+	}
+}
+EXPORT_SYMBOL(netdev_rx_csum_fault);
+#endif
+
 #ifdef CONFIG_HIGHMEM
 /* Actually, we should eliminate this check as soon as we know, that:
  * 1. IOMMU is present and allows to map all the memory.
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 802fe11..49424a4 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -101,16 +101,20 @@
 static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
 			     unsigned short ulen, u32 saddr, u32 daddr)
 {
-	if (uh->check == 0)
+	unsigned int psum;
+
+	if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
 		return 0;
 
-	if (skb->ip_summed == CHECKSUM_HW)
-		return csum_tcpudp_magic(
-			saddr, daddr, ulen, IPPROTO_UDP, skb->csum);
+	psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
 
-	skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+	if (skb->ip_summed == CHECKSUM_HW &&
+	    !(u16)csum_fold(csum_add(psum, skb->csum)))
+		return 0;
 
-	return csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+	skb->csum = psum;
+
+	return __skb_checksum_complete(skb);
 }
 
 /*
@@ -489,7 +493,7 @@
 
 	if (ulen != len)
 		goto out;
-	if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr) < 0)
+	if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
 		goto out;
 	if (np->local_ip && np->local_ip != ntohl(iph->daddr))
 		goto out;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9bed756..8700379 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -49,6 +49,7 @@
 #include <net/udp.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
+#include <net/netlink.h>
 
 DECLARE_MUTEX(rtnl_sem);
 
@@ -462,11 +463,6 @@
 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_KERNEL);
 }
 
-static int rtnetlink_done(struct netlink_callback *cb)
-{
-	return 0;
-}
-
 /* Protected by RTNL sempahore.  */
 static struct rtattr **rta_buf;
 static int rtattr_max;
@@ -524,8 +520,6 @@
 	}
 
 	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
-		u32 rlen;
-
 		if (link->dumpit == NULL)
 			link = &(rtnetlink_links[PF_UNSPEC][type]);
 
@@ -533,14 +527,11 @@
 			goto err_inval;
 
 		if ((*errp = netlink_dump_start(rtnl, skb, nlh,
-						link->dumpit,
-						rtnetlink_done)) != 0) {
+						link->dumpit, NULL)) != 0) {
 			return -1;
 		}
-		rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-		if (rlen > skb->len)
-			rlen = skb->len;
-		skb_pull(skb, rlen);
+
+		netlink_queue_skip(nlh, skb);
 		return -1;
 	}
 
@@ -579,75 +570,13 @@
 	return -1;
 }
 
-/* 
- * Process one packet of messages.
- * Malformed skbs with wrong lengths of messages are discarded silently.
- */
-
-static inline int rtnetlink_rcv_skb(struct sk_buff *skb)
-{
-	int err;
-	struct nlmsghdr * nlh;
-
-	while (skb->len >= NLMSG_SPACE(0)) {
-		u32 rlen;
-
-		nlh = (struct nlmsghdr *)skb->data;
-		if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
-			return 0;
-		rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-		if (rlen > skb->len)
-			rlen = skb->len;
-		if (rtnetlink_rcv_msg(skb, nlh, &err)) {
-			/* Not error, but we must interrupt processing here:
-			 *   Note, that in this case we do not pull message
-			 *   from skb, it will be processed later.
-			 */
-			if (err == 0)
-				return -1;
-			netlink_ack(skb, nlh, err);
-		} else if (nlh->nlmsg_flags&NLM_F_ACK)
-			netlink_ack(skb, nlh, 0);
-		skb_pull(skb, rlen);
-	}
-
-	return 0;
-}
-
-/*
- *  rtnetlink input queue processing routine:
- *	- process as much as there was in the queue upon entry.
- *	- feed skbs to rtnetlink_rcv_skb, until it refuse a message,
- *	  that will occur, when a dump started.
- */
-
 static void rtnetlink_rcv(struct sock *sk, int len)
 {
-	unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
+	unsigned int qlen = 0;
 
 	do {
-		struct sk_buff *skb;
-
 		rtnl_lock();
-
-		if (qlen > skb_queue_len(&sk->sk_receive_queue))
-			qlen = skb_queue_len(&sk->sk_receive_queue);
-
-		for (; qlen; qlen--) {
-			skb = skb_dequeue(&sk->sk_receive_queue);
-			if (rtnetlink_rcv_skb(skb)) {
-				if (skb->len)
-					skb_queue_head(&sk->sk_receive_queue,
-						       skb);
-				else {
-					kfree_skb(skb);
-					qlen--;
-				}
-				break;
-			}
-			kfree_skb(skb);
-		}
-
+		netlink_run_queue(sk, &qlen, &rtnetlink_rcv_msg);
 		up(&rtnl_sem);
 
 		netdev_run_todo();
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 95501e4..b7d13a4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -336,6 +336,9 @@
 	}
 #ifdef CONFIG_NETFILTER
 	nf_conntrack_put(skb->nfct);
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+	nf_conntrack_put_reasm(skb->nfct_reasm);
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	nf_bridge_put(skb->nf_bridge);
 #endif
@@ -414,9 +417,17 @@
 	C(nfct);
 	nf_conntrack_get(skb->nfct);
 	C(nfctinfo);
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+	C(nfct_reasm);
+	nf_conntrack_get_reasm(skb->nfct_reasm);
+#endif
 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 	C(ipvs_property);
 #endif
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+	C(nfct_reasm);
+	nf_conntrack_get_reasm(skb->nfct_reasm);
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	C(nf_bridge);
 	nf_bridge_get(skb->nf_bridge);
@@ -474,6 +485,10 @@
 	new->nfct	= old->nfct;
 	nf_conntrack_get(old->nfct);
 	new->nfctinfo	= old->nfctinfo;
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+	new->nfct_reasm = old->nfct_reasm;
+	nf_conntrack_get_reasm(old->nfct_reasm);
+#endif
 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 	new->ipvs_property = old->ipvs_property;
 #endif
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 175e093..e3eceec 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -934,11 +934,11 @@
 	case CHECKSUM_HW:
 		if (!(u16)csum_fold(skb->csum))
 			break;
-		LIMIT_NETDEBUG(KERN_DEBUG "icmp v4 hw csum failure\n");
+		/* fall through */
 	case CHECKSUM_NONE:
-		if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))
+		skb->csum = 0;
+		if (__skb_checksum_complete(skb))
 			goto error;
-	default:;
 	}
 
 	if (!pskb_pull(skb, sizeof(struct icmphdr)))
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c6247fc..c04607b 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -872,11 +872,18 @@
 		return 0;
 	}
 
-	if (!pskb_may_pull(skb, sizeof(struct igmphdr)) || 
-	    (u16)csum_fold(skb_checksum(skb, 0, len, 0))) {
-		in_dev_put(in_dev);
-		kfree_skb(skb);
-		return 0;
+	if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
+		goto drop;
+
+	switch (skb->ip_summed) {
+	case CHECKSUM_HW:
+		if (!(u16)csum_fold(skb->csum))
+			break;
+		/* fall through */
+	case CHECKSUM_NONE:
+		skb->csum = 0;
+		if (__skb_checksum_complete(skb))
+			goto drop;
 	}
 
 	ih = skb->h.igmph;
@@ -906,6 +913,8 @@
 	default:
 		NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type);
 	}
+
+drop:
 	in_dev_put(in_dev);
 	kfree_skb(skb);
 	return 0;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 71f3c73..39061ed 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -724,12 +724,6 @@
 	return skb->len;
 }
 
-static int inet_diag_dump_done(struct netlink_callback *cb)
-{
-	return 0;
-}
-
-
 static __inline__ int
 inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
@@ -760,8 +754,7 @@
 				goto err_inval;
 		}
 		return netlink_dump_start(idiagnl, skb, nlh,
-					  inet_diag_dump,
-					  inet_diag_dump_done);
+					  inet_diag_dump, NULL);
 	} else {
 		return inet_diag_get_exact(skb, nlh);
 	}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 896ce3f..4e9c74b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -577,15 +577,16 @@
 			goto drop_nolock;
 
 		if (flags&GRE_CSUM) {
-			if (skb->ip_summed == CHECKSUM_HW) {
+			switch (skb->ip_summed) {
+			case CHECKSUM_HW:
 				csum = (u16)csum_fold(skb->csum);
-				if (csum)
-					skb->ip_summed = CHECKSUM_NONE;
-			}
-			if (skb->ip_summed == CHECKSUM_NONE) {
-				skb->csum = skb_checksum(skb, 0, skb->len, 0);
+				if (!csum)
+					break;
+				/* fall through */
+			case CHECKSUM_NONE:
+				skb->csum = 0;
+				csum = __skb_checksum_complete(skb);
 				skb->ip_summed = CHECKSUM_HW;
-				csum = (u16)csum_fold(skb->csum);
 			}
 			offset += 4;
 		}
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 7d917e4..9d3c8b5 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -5,6 +5,20 @@
 menu "IP: Netfilter Configuration"
 	depends on INET && NETFILTER
 
+config NF_CONNTRACK_IPV4
+	tristate "IPv4 support for new connection tracking (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	---help---
+	  Connection tracking keeps a record of what packets have passed
+	  through your machine, in order to figure out how they are related
+	  into connections.
+
+	  This is IPv4 support on Layer 3 independent connection tracking.
+	  Layer 3 independent connection tracking is experimental scheme
+	  which generalize ip_conntrack to support other layer 3 protocols.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 # connection tracking, helpers and protocols
 config IP_NF_CONNTRACK
 	tristate "Connection tracking (required for masq/NAT)"
@@ -209,8 +223,8 @@
 	tristate "Packet type match support"
 	depends on IP_NF_IPTABLES
 	help
-         Packet type matching allows you to match a packet by
-         its "class", eg. BROADCAST, MULTICAST, ...
+	  Packet type matching allows you to match a packet by
+	  its "class", eg. BROADCAST, MULTICAST, ...
 
 	  Typical usage:
 	  iptables -A INPUT -m pkttype --pkt-type broadcast -j LOG
@@ -317,7 +331,8 @@
 
 config IP_NF_MATCH_HELPER
 	tristate "Helper match support"
-	depends on IP_NF_CONNTRACK && IP_NF_IPTABLES
+	depends on IP_NF_IPTABLES
+	depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
 	help
 	  Helper matching allows you to match packets in dynamic connections
 	  tracked by a conntrack-helper, ie. ip_conntrack_ftp
@@ -326,7 +341,8 @@
 
 config IP_NF_MATCH_STATE
 	tristate "Connection state match support"
-	depends on IP_NF_CONNTRACK && IP_NF_IPTABLES
+	depends on IP_NF_IPTABLES
+	depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
 	help
 	  Connection state matching allows you to match packets based on their
 	  relationship to a tracked connection (ie. previous packets).  This
@@ -336,7 +352,8 @@
 
 config IP_NF_MATCH_CONNTRACK
 	tristate "Connection tracking match support"
-	depends on IP_NF_CONNTRACK && IP_NF_IPTABLES
+	depends on IP_NF_IPTABLES
+	depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
 	help
 	  This is a general conntrack match module, a superset of the state match.
 
@@ -422,7 +439,8 @@
 
 config IP_NF_MATCH_CONNMARK
 	tristate  'Connection mark match support'
-	depends on IP_NF_CONNTRACK_MARK && IP_NF_IPTABLES
+	depends on IP_NF_IPTABLES
+	depends on IP_NF_CONNTRACK_MARK || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
 	help
 	  This option adds a `connmark' match, which allows you to match the
 	  connection mark value previously set for the session by `CONNMARK'. 
@@ -433,7 +451,8 @@
 
 config IP_NF_MATCH_CONNBYTES
 	tristate  'Connection byte/packet counter match support'
-	depends on IP_NF_CT_ACCT && IP_NF_IPTABLES
+	depends on IP_NF_IPTABLES
+	depends on IP_NF_CT_ACCT || (NF_CT_ACCT && NF_CONNTRACK_IPV4)
 	help
 	  This option adds a `connbytes' match, which allows you to match the
 	  number of bytes and/or packets for each direction within a connection.
@@ -747,7 +766,8 @@
 
 config IP_NF_TARGET_CONNMARK
 	tristate  'CONNMARK target support'
-	depends on IP_NF_CONNTRACK_MARK && IP_NF_MANGLE
+	depends on IP_NF_MANGLE
+	depends on IP_NF_CONNTRACK_MARK || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
 	help
 	  This option adds a `CONNMARK' target, which allows one to manipulate
 	  the connection mark value.  Similar to the MARK target, but
@@ -759,7 +779,8 @@
 
 config IP_NF_TARGET_CLUSTERIP
 	tristate "CLUSTERIP target support (EXPERIMENTAL)"
-	depends on IP_NF_CONNTRACK_MARK && IP_NF_IPTABLES && EXPERIMENTAL
+	depends on IP_NF_IPTABLES && EXPERIMENTAL
+	depends on IP_NF_CONNTRACK_MARK || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
 	help
 	  The CLUSTERIP target allows you to build load-balancing clusters of
 	  network servers without having a dedicated load-balancing
@@ -782,7 +803,7 @@
 config IP_NF_TARGET_NOTRACK
 	tristate  'NOTRACK target support'
 	depends on IP_NF_RAW
-	depends on IP_NF_CONNTRACK
+	depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
 	help
 	  The NOTRACK target allows a select rule to specify
 	  which packets *not* to enter the conntrack/NAT
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index dab4b58..058c48e 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -103,3 +103,9 @@
 obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
 
 obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
+
+# objects for l3 independent conntrack
+nf_conntrack_ipv4-objs  :=  nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
+
+# l3 independent conntrack
+obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 5c1c0a3..d2a4fec 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -1376,7 +1376,7 @@
 				ip_conntrack_expect_put(exp);
 			}
 		}
-		write_unlock(&ip_conntrack_lock);
+		write_unlock_bh(&ip_conntrack_lock);
 	} else {
 		/* This basically means we have to flush everything*/
 		write_lock_bh(&ip_conntrack_lock);
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
index 5198f3a..e4d6b26 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
@@ -13,6 +13,7 @@
 #include <linux/in.h>
 #include <linux/icmp.h>
 #include <linux/seq_file.h>
+#include <linux/skbuff.h>
 #include <net/ip.h>
 #include <net/checksum.h>
 #include <linux/netfilter.h>
@@ -230,19 +231,15 @@
 	case CHECKSUM_HW:
 		if (!(u16)csum_fold(skb->csum)) 
 			break;
-		if (LOG_INVALID(IPPROTO_ICMP))
-			nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-				      "ip_ct_icmp: bad HW ICMP checksum ");
-		return -NF_ACCEPT;
+		/* fall through */
 	case CHECKSUM_NONE:
-		if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) {
+		skb->csum = 0;
+		if (__skb_checksum_complete(skb)) {
 			if (LOG_INVALID(IPPROTO_ICMP))
 				nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
 					      "ip_ct_icmp: bad ICMP checksum ");
 			return -NF_ACCEPT;
 		}
-	default:
-		break;
 	}
 
 checksum_skipped:
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 9bcb398..45c52d8 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -29,7 +29,7 @@
 
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
 
 #define CLUSTERIP_VERSION "0.8"
 
@@ -316,14 +316,14 @@
 {
 	const struct ipt_clusterip_tgt_info *cipinfo = targinfo;
 	enum ip_conntrack_info ctinfo;
-	struct ip_conntrack *ct = ip_conntrack_get((*pskb), &ctinfo);
-	u_int32_t hash;
+	u_int32_t *mark, hash;
 
 	/* don't need to clusterip_config_get() here, since refcount
 	 * is only decremented by destroy() - and ip_tables guarantees
 	 * that the ->target() function isn't called after ->destroy() */
 
-	if (!ct) {
+	mark = nf_ct_get_mark((*pskb), &ctinfo);
+	if (mark == NULL) {
 		printk(KERN_ERR "CLUSTERIP: no conntrack!\n");
 			/* FIXME: need to drop invalid ones, since replies
 			 * to outgoing connections of other nodes will be 
@@ -346,7 +346,7 @@
 
 	switch (ctinfo) {
 		case IP_CT_NEW:
-			ct->mark = hash;
+			*mark = hash;
 			break;
 		case IP_CT_RELATED:
 		case IP_CT_RELATED+IP_CT_IS_REPLY:
@@ -363,7 +363,7 @@
 #ifdef DEBUG_CLUSTERP
 	DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 #endif
-	DEBUGP("hash=%u ct_hash=%u ", hash, ct->mark);
+	DEBUGP("hash=%u ct_hash=%u ", hash, *mark);
 	if (!clusterip_responsible(cipinfo->config, hash)) {
 		DEBUGP("not responsible\n");
 		return NF_DROP;
diff --git a/net/ipv4/netfilter/ipt_CONNMARK.c b/net/ipv4/netfilter/ipt_CONNMARK.c
index 05d66ab..8acac5a 100644
--- a/net/ipv4/netfilter/ipt_CONNMARK.c
+++ b/net/ipv4/netfilter/ipt_CONNMARK.c
@@ -29,7 +29,7 @@
 
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv4/ipt_CONNMARK.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
 
 static unsigned int
 target(struct sk_buff **pskb,
@@ -43,24 +43,24 @@
 	u_int32_t diff;
 	u_int32_t nfmark;
 	u_int32_t newmark;
+	u_int32_t ctinfo;
+	u_int32_t *ctmark = nf_ct_get_mark(*pskb, &ctinfo);
 
-	enum ip_conntrack_info ctinfo;
-	struct ip_conntrack *ct = ip_conntrack_get((*pskb), &ctinfo);
-	if (ct) {
+	if (ctmark) {
 	    switch(markinfo->mode) {
 	    case IPT_CONNMARK_SET:
-		newmark = (ct->mark & ~markinfo->mask) | markinfo->mark;
-		if (newmark != ct->mark)
-		    ct->mark = newmark;
+		newmark = (*ctmark & ~markinfo->mask) | markinfo->mark;
+		if (newmark != *ctmark)
+		    *ctmark = newmark;
 		break;
 	    case IPT_CONNMARK_SAVE:
-		newmark = (ct->mark & ~markinfo->mask) | ((*pskb)->nfmark & markinfo->mask);
-		if (ct->mark != newmark)
-		    ct->mark = newmark;
+		newmark = (*ctmark & ~markinfo->mask) | ((*pskb)->nfmark & markinfo->mask);
+		if (*ctmark != newmark)
+		    *ctmark = newmark;
 		break;
 	    case IPT_CONNMARK_RESTORE:
 		nfmark = (*pskb)->nfmark;
-		diff = (ct->mark ^ nfmark) & markinfo->mask;
+		diff = (*ctmark ^ nfmark) & markinfo->mask;
 		if (diff != 0)
 		    (*pskb)->nfmark = nfmark ^ diff;
 		break;
diff --git a/net/ipv4/netfilter/ipt_NOTRACK.c b/net/ipv4/netfilter/ipt_NOTRACK.c
index a4bb9b3..e3c69d0 100644
--- a/net/ipv4/netfilter/ipt_NOTRACK.c
+++ b/net/ipv4/netfilter/ipt_NOTRACK.c
@@ -5,7 +5,7 @@
 #include <linux/skbuff.h>
 
 #include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
 
 static unsigned int
 target(struct sk_buff **pskb,
@@ -23,7 +23,7 @@
 	   If there is a real ct entry correspondig to this packet, 
 	   it'll hang aroun till timing out. We don't deal with it
 	   for performance reasons. JK */
-	(*pskb)->nfct = &ip_conntrack_untracked.ct_general;
+	nf_ct_untrack(*pskb);
 	(*pskb)->nfctinfo = IP_CT_NEW;
 	nf_conntrack_get((*pskb)->nfct);
 
diff --git a/net/ipv4/netfilter/ipt_connbytes.c b/net/ipv4/netfilter/ipt_connbytes.c
index df4a42c..d68a048 100644
--- a/net/ipv4/netfilter/ipt_connbytes.c
+++ b/net/ipv4/netfilter/ipt_connbytes.c
@@ -10,7 +10,7 @@
  */
 #include <linux/module.h>
 #include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv4/ipt_connbytes.h>
 
@@ -46,60 +46,59 @@
       int *hotdrop)
 {
 	const struct ipt_connbytes_info *sinfo = matchinfo;
-	enum ip_conntrack_info ctinfo;
-	struct ip_conntrack *ct;
 	u_int64_t what = 0;	/* initialize to make gcc happy */
+	const struct ip_conntrack_counter *counters;
 
-	if (!(ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo)))
+	if (!(counters = nf_ct_get_counters(skb)))
 		return 0; /* no match */
 
 	switch (sinfo->what) {
 	case IPT_CONNBYTES_PKTS:
 		switch (sinfo->direction) {
 		case IPT_CONNBYTES_DIR_ORIGINAL:
-			what = ct->counters[IP_CT_DIR_ORIGINAL].packets;
+			what = counters[IP_CT_DIR_ORIGINAL].packets;
 			break;
 		case IPT_CONNBYTES_DIR_REPLY:
-			what = ct->counters[IP_CT_DIR_REPLY].packets;
+			what = counters[IP_CT_DIR_REPLY].packets;
 			break;
 		case IPT_CONNBYTES_DIR_BOTH:
-			what = ct->counters[IP_CT_DIR_ORIGINAL].packets;
-			what += ct->counters[IP_CT_DIR_REPLY].packets;
+			what = counters[IP_CT_DIR_ORIGINAL].packets;
+			what += counters[IP_CT_DIR_REPLY].packets;
 			break;
 		}
 		break;
 	case IPT_CONNBYTES_BYTES:
 		switch (sinfo->direction) {
 		case IPT_CONNBYTES_DIR_ORIGINAL:
-			what = ct->counters[IP_CT_DIR_ORIGINAL].bytes;
+			what = counters[IP_CT_DIR_ORIGINAL].bytes;
 			break;
 		case IPT_CONNBYTES_DIR_REPLY:
-			what = ct->counters[IP_CT_DIR_REPLY].bytes;
+			what = counters[IP_CT_DIR_REPLY].bytes;
 			break;
 		case IPT_CONNBYTES_DIR_BOTH:
-			what = ct->counters[IP_CT_DIR_ORIGINAL].bytes;
-			what += ct->counters[IP_CT_DIR_REPLY].bytes;
+			what = counters[IP_CT_DIR_ORIGINAL].bytes;
+			what += counters[IP_CT_DIR_REPLY].bytes;
 			break;
 		}
 		break;
 	case IPT_CONNBYTES_AVGPKT:
 		switch (sinfo->direction) {
 		case IPT_CONNBYTES_DIR_ORIGINAL:
-			what = div64_64(ct->counters[IP_CT_DIR_ORIGINAL].bytes,
-					ct->counters[IP_CT_DIR_ORIGINAL].packets);
+			what = div64_64(counters[IP_CT_DIR_ORIGINAL].bytes,
+					counters[IP_CT_DIR_ORIGINAL].packets);
 			break;
 		case IPT_CONNBYTES_DIR_REPLY:
-			what = div64_64(ct->counters[IP_CT_DIR_REPLY].bytes,
-					ct->counters[IP_CT_DIR_REPLY].packets);
+			what = div64_64(counters[IP_CT_DIR_REPLY].bytes,
+					counters[IP_CT_DIR_REPLY].packets);
 			break;
 		case IPT_CONNBYTES_DIR_BOTH:
 			{
 				u_int64_t bytes;
 				u_int64_t pkts;
-				bytes = ct->counters[IP_CT_DIR_ORIGINAL].bytes +
-					ct->counters[IP_CT_DIR_REPLY].bytes;
-				pkts = ct->counters[IP_CT_DIR_ORIGINAL].packets+
-					ct->counters[IP_CT_DIR_REPLY].packets;
+				bytes = counters[IP_CT_DIR_ORIGINAL].bytes +
+					counters[IP_CT_DIR_REPLY].bytes;
+				pkts = counters[IP_CT_DIR_ORIGINAL].packets+
+					counters[IP_CT_DIR_REPLY].packets;
 
 				/* FIXME_THEORETICAL: what to do if sum
 				 * overflows ? */
diff --git a/net/ipv4/netfilter/ipt_connmark.c b/net/ipv4/netfilter/ipt_connmark.c
index bf8de47..5306ef2 100644
--- a/net/ipv4/netfilter/ipt_connmark.c
+++ b/net/ipv4/netfilter/ipt_connmark.c
@@ -28,7 +28,7 @@
 
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv4/ipt_connmark.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
 
 static int
 match(const struct sk_buff *skb,
@@ -39,12 +39,12 @@
       int *hotdrop)
 {
 	const struct ipt_connmark_info *info = matchinfo;
-	enum ip_conntrack_info ctinfo;
-	struct ip_conntrack *ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
-	if (!ct)
+	u_int32_t ctinfo;
+	const u_int32_t *ctmark = nf_ct_get_mark(skb, &ctinfo);
+	if (!ctmark)
 		return 0;
 
-	return ((ct->mark & info->mask) == info->mark) ^ info->invert;
+	return (((*ctmark) & info->mask) == info->mark) ^ info->invert;
 }
 
 static int
diff --git a/net/ipv4/netfilter/ipt_conntrack.c b/net/ipv4/netfilter/ipt_conntrack.c
index c1d2280..c8d1870 100644
--- a/net/ipv4/netfilter/ipt_conntrack.c
+++ b/net/ipv4/netfilter/ipt_conntrack.c
@@ -10,7 +10,14 @@
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
+
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
 #include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
+#else
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv4/ipt_conntrack.h>
 
@@ -18,6 +25,8 @@
 MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
 MODULE_DESCRIPTION("iptables connection tracking match module");
 
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
+
 static int
 match(const struct sk_buff *skb,
       const struct net_device *in,
@@ -102,6 +111,93 @@
 	return 1;
 }
 
+#else /* CONFIG_IP_NF_CONNTRACK */
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      int *hotdrop)
+{
+	const struct ipt_conntrack_info *sinfo = matchinfo;
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+	unsigned int statebit;
+
+	ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
+
+#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
+
+	if (ct == &nf_conntrack_untracked)
+		statebit = IPT_CONNTRACK_STATE_UNTRACKED;
+	else if (ct)
+ 		statebit = IPT_CONNTRACK_STATE_BIT(ctinfo);
+ 	else
+ 		statebit = IPT_CONNTRACK_STATE_INVALID;
+ 
+	if(sinfo->flags & IPT_CONNTRACK_STATE) {
+		if (ct) {
+			if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip !=
+			    ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip)
+				statebit |= IPT_CONNTRACK_STATE_SNAT;
+
+			if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip !=
+			    ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip)
+				statebit |= IPT_CONNTRACK_STATE_DNAT;
+		}
+
+		if (FWINV((statebit & sinfo->statemask) == 0, IPT_CONNTRACK_STATE))
+			return 0;
+	}
+
+	if(sinfo->flags & IPT_CONNTRACK_PROTO) {
+		if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, IPT_CONNTRACK_PROTO))
+                	return 0;
+	}
+
+	if(sinfo->flags & IPT_CONNTRACK_ORIGSRC) {
+		if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, IPT_CONNTRACK_ORIGSRC))
+			return 0;
+	}
+
+	if(sinfo->flags & IPT_CONNTRACK_ORIGDST) {
+		if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, IPT_CONNTRACK_ORIGDST))
+			return 0;
+	}
+
+	if(sinfo->flags & IPT_CONNTRACK_REPLSRC) {
+		if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, IPT_CONNTRACK_REPLSRC))
+			return 0;
+	}
+
+	if(sinfo->flags & IPT_CONNTRACK_REPLDST) {
+		if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, IPT_CONNTRACK_REPLDST))
+			return 0;
+	}
+
+	if(sinfo->flags & IPT_CONNTRACK_STATUS) {
+		if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, IPT_CONNTRACK_STATUS))
+			return 0;
+	}
+
+	if(sinfo->flags & IPT_CONNTRACK_EXPIRES) {
+		unsigned long expires;
+
+		if(!ct)
+			return 0;
+
+		expires = timer_pending(&ct->timeout) ? (ct->timeout.expires - jiffies)/HZ : 0;
+
+		if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), IPT_CONNTRACK_EXPIRES))
+			return 0;
+	}
+
+	return 1;
+}
+
+#endif /* CONFIG_NF_IP_CONNTRACK */
+
 static int check(const char *tablename,
 		 const struct ipt_ip *ip,
 		 void *matchinfo,
diff --git a/net/ipv4/netfilter/ipt_helper.c b/net/ipv4/netfilter/ipt_helper.c
index 3e7dd01..bf14e1c 100644
--- a/net/ipv4/netfilter/ipt_helper.c
+++ b/net/ipv4/netfilter/ipt_helper.c
@@ -13,9 +13,15 @@
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/netfilter.h>
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
 #include <linux/netfilter_ipv4/ip_conntrack.h>
 #include <linux/netfilter_ipv4/ip_conntrack_core.h>
 #include <linux/netfilter_ipv4/ip_conntrack_helper.h>
+#else
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#endif
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv4/ipt_helper.h>
 
@@ -29,6 +35,7 @@
 #define DEBUGP(format, args...)
 #endif
 
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
 static int
 match(const struct sk_buff *skb,
       const struct net_device *in,
@@ -73,6 +80,53 @@
 	return ret;
 }
 
+#else /* CONFIG_IP_NF_CONNTRACK */
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      int *hotdrop)
+{
+	const struct ipt_helper_info *info = matchinfo;
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+	int ret = info->invert;
+	
+	ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
+	if (!ct) {
+		DEBUGP("ipt_helper: Eek! invalid conntrack?\n");
+		return ret;
+	}
+
+	if (!ct->master) {
+		DEBUGP("ipt_helper: conntrack %p has no master\n", ct);
+		return ret;
+	}
+
+	read_lock_bh(&nf_conntrack_lock);
+	if (!ct->master->helper) {
+		DEBUGP("ipt_helper: master ct %p has no helper\n", 
+			exp->expectant);
+		goto out_unlock;
+	}
+
+	DEBUGP("master's name = %s , info->name = %s\n", 
+		ct->master->helper->name, info->name);
+
+	if (info->name[0] == '\0')
+		ret ^= 1;
+	else
+		ret ^= !strncmp(ct->master->helper->name, info->name, 
+		                strlen(ct->master->helper->name));
+out_unlock:
+	read_unlock_bh(&nf_conntrack_lock);
+	return ret;
+}
+#endif
+
 static int check(const char *tablename,
 		 const struct ipt_ip *ip,
 		 void *matchinfo,
diff --git a/net/ipv4/netfilter/ipt_state.c b/net/ipv4/netfilter/ipt_state.c
index b1511b9..4d7f16b 100644
--- a/net/ipv4/netfilter/ipt_state.c
+++ b/net/ipv4/netfilter/ipt_state.c
@@ -10,7 +10,7 @@
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv4/ipt_state.h>
 
@@ -30,9 +30,9 @@
 	enum ip_conntrack_info ctinfo;
 	unsigned int statebit;
 
-	if (skb->nfct == &ip_conntrack_untracked.ct_general)
+	if (nf_ct_is_untracked(skb))
 		statebit = IPT_STATE_UNTRACKED;
-	else if (!ip_conntrack_get(skb, &ctinfo))
+	else if (!nf_ct_get_ctinfo(skb, &ctinfo))
 		statebit = IPT_STATE_INVALID;
 	else
 		statebit = IPT_STATE_BIT(ctinfo);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
new file mode 100644
index 0000000..8202c1c
--- /dev/null
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -0,0 +1,571 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- move L3 protocol dependent part to this file.
+ * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- add get_features() to support various size of conntrack
+ *	  structures.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_standalone.c
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/sysctl.h>
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+DECLARE_PER_CPU(struct nf_conntrack_stat, nf_conntrack_stat);
+
+static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
+			     struct nf_conntrack_tuple *tuple)
+{
+	u_int32_t _addrs[2], *ap;
+	ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
+				sizeof(u_int32_t) * 2, _addrs);
+	if (ap == NULL)
+		return 0;
+
+	tuple->src.u3.ip = ap[0];
+	tuple->dst.u3.ip = ap[1];
+
+	return 1;
+}
+
+static int ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
+			   const struct nf_conntrack_tuple *orig)
+{
+	tuple->src.u3.ip = orig->dst.u3.ip;
+	tuple->dst.u3.ip = orig->src.u3.ip;
+
+	return 1;
+}
+
+static int ipv4_print_tuple(struct seq_file *s,
+			    const struct nf_conntrack_tuple *tuple)
+{
+	return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
+		          NIPQUAD(tuple->src.u3.ip),
+			  NIPQUAD(tuple->dst.u3.ip));
+}
+
+static int ipv4_print_conntrack(struct seq_file *s,
+				const struct nf_conn *conntrack)
+{
+	return 0;
+}
+
+/* Returns new sk_buff, or NULL */
+static struct sk_buff *
+nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
+{
+	skb_orphan(skb);
+
+        local_bh_disable();
+        skb = ip_defrag(skb, user);
+        local_bh_enable();
+
+        if (skb)
+		ip_send_check(skb->nh.iph);
+
+        return skb;
+}
+
+static int
+ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
+	     u_int8_t *protonum)
+{
+	/* Never happen */
+	if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) {
+		if (net_ratelimit()) {
+			printk(KERN_ERR "ipv4_prepare: Frag of proto %u (hook=%u)\n",
+			(*pskb)->nh.iph->protocol, hooknum);
+		}
+		return -NF_DROP;
+	}
+
+	*dataoff = (*pskb)->nh.raw - (*pskb)->data + (*pskb)->nh.iph->ihl*4;
+	*protonum = (*pskb)->nh.iph->protocol;
+
+	return NF_ACCEPT;
+}
+
+int nat_module_is_loaded = 0;
+static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple)
+{
+	if (nat_module_is_loaded)
+		return NF_CT_F_NAT;
+
+	return NF_CT_F_BASIC;
+}
+
+static unsigned int ipv4_confirm(unsigned int hooknum,
+				 struct sk_buff **pskb,
+				 const struct net_device *in,
+				 const struct net_device *out,
+				 int (*okfn)(struct sk_buff *))
+{
+	/* We've seen it coming out the other side: confirm it */
+	return nf_conntrack_confirm(pskb);
+}
+
+static unsigned int ipv4_conntrack_help(unsigned int hooknum,
+				      struct sk_buff **pskb,
+				      const struct net_device *in,
+				      const struct net_device *out,
+				      int (*okfn)(struct sk_buff *))
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	/* This is where we call the helper: as the packet goes out. */
+	ct = nf_ct_get(*pskb, &ctinfo);
+	if (ct && ct->helper) {
+		unsigned int ret;
+		ret = ct->helper->help(pskb,
+				       (*pskb)->nh.raw - (*pskb)->data
+						       + (*pskb)->nh.iph->ihl*4,
+				       ct, ctinfo);
+		if (ret != NF_ACCEPT)
+			return ret;
+	}
+	return NF_ACCEPT;
+}
+
+static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+					  struct sk_buff **pskb,
+					  const struct net_device *in,
+					  const struct net_device *out,
+					  int (*okfn)(struct sk_buff *))
+{
+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
+	/* Previously seen (loopback)?  Ignore.  Do this before
+	   fragment check. */
+	if ((*pskb)->nfct)
+		return NF_ACCEPT;
+#endif
+
+	/* Gather fragments. */
+	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+		*pskb = nf_ct_ipv4_gather_frags(*pskb,
+						hooknum == NF_IP_PRE_ROUTING ?
+						IP_DEFRAG_CONNTRACK_IN :
+						IP_DEFRAG_CONNTRACK_OUT);
+		if (!*pskb)
+			return NF_STOLEN;
+	}
+	return NF_ACCEPT;
+}
+
+static unsigned int ipv4_refrag(unsigned int hooknum,
+				struct sk_buff **pskb,
+				const struct net_device *in,
+				const struct net_device *out,
+				int (*okfn)(struct sk_buff *))
+{
+	struct rtable *rt = (struct rtable *)(*pskb)->dst;
+
+	/* We've seen it coming out the other side: confirm */
+	if (ipv4_confirm(hooknum, pskb, in, out, okfn) != NF_ACCEPT)
+		return NF_DROP;
+
+	/* Local packets are never produced too large for their
+	   interface.  We degfragment them at LOCAL_OUT, however,
+	   so we have to refragment them here. */
+	if ((*pskb)->len > dst_mtu(&rt->u.dst) &&
+	    !skb_shinfo(*pskb)->tso_size) {
+		/* No hook can be after us, so this should be OK. */
+		ip_fragment(*pskb, okfn);
+		return NF_STOLEN;
+	}
+	return NF_ACCEPT;
+}
+
+static unsigned int ipv4_conntrack_in(unsigned int hooknum,
+				      struct sk_buff **pskb,
+				      const struct net_device *in,
+				      const struct net_device *out,
+				      int (*okfn)(struct sk_buff *))
+{
+	return nf_conntrack_in(PF_INET, hooknum, pskb);
+}
+
+static unsigned int ipv4_conntrack_local(unsigned int hooknum,
+				         struct sk_buff **pskb,
+				         const struct net_device *in,
+				         const struct net_device *out,
+				         int (*okfn)(struct sk_buff *))
+{
+	/* root is playing with raw sockets. */
+	if ((*pskb)->len < sizeof(struct iphdr)
+	    || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
+		if (net_ratelimit())
+			printk("ipt_hook: happy cracking.\n");
+		return NF_ACCEPT;
+	}
+	return nf_conntrack_in(PF_INET, hooknum, pskb);
+}
+
+/* Connection tracking may drop packets, but never alters them, so
+   make it the first hook. */
+static struct nf_hook_ops ipv4_conntrack_defrag_ops = {
+	.hook		= ipv4_conntrack_defrag,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_PRE_ROUTING,
+	.priority	= NF_IP_PRI_CONNTRACK_DEFRAG,
+};
+
+static struct nf_hook_ops ipv4_conntrack_in_ops = {
+	.hook		= ipv4_conntrack_in,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_PRE_ROUTING,
+	.priority	= NF_IP_PRI_CONNTRACK,
+};
+
+static struct nf_hook_ops ipv4_conntrack_defrag_local_out_ops = {
+	.hook           = ipv4_conntrack_defrag,
+	.owner          = THIS_MODULE,
+	.pf             = PF_INET,
+	.hooknum        = NF_IP_LOCAL_OUT,
+	.priority       = NF_IP_PRI_CONNTRACK_DEFRAG,
+};
+
+static struct nf_hook_ops ipv4_conntrack_local_out_ops = {
+	.hook		= ipv4_conntrack_local,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_LOCAL_OUT,
+	.priority	= NF_IP_PRI_CONNTRACK,
+};
+
+/* helpers */
+static struct nf_hook_ops ipv4_conntrack_helper_out_ops = {
+	.hook		= ipv4_conntrack_help,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_POST_ROUTING,
+	.priority	= NF_IP_PRI_CONNTRACK_HELPER,
+};
+
+static struct nf_hook_ops ipv4_conntrack_helper_in_ops = {
+	.hook		= ipv4_conntrack_help,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_LOCAL_IN,
+	.priority	= NF_IP_PRI_CONNTRACK_HELPER,
+};
+
+
+/* Refragmenter; last chance. */
+static struct nf_hook_ops ipv4_conntrack_out_ops = {
+	.hook		= ipv4_refrag,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_POST_ROUTING,
+	.priority	= NF_IP_PRI_CONNTRACK_CONFIRM,
+};
+
+static struct nf_hook_ops ipv4_conntrack_local_in_ops = {
+	.hook		= ipv4_confirm,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_LOCAL_IN,
+	.priority	= NF_IP_PRI_CONNTRACK_CONFIRM,
+};
+
+#ifdef CONFIG_SYSCTL
+/* From nf_conntrack_proto_icmp.c */
+extern unsigned long nf_ct_icmp_timeout;
+static struct ctl_table_header *nf_ct_ipv4_sysctl_header;
+
+static ctl_table nf_ct_sysctl_table[] = {
+	{
+		.ctl_name	= NET_NF_CONNTRACK_ICMP_TIMEOUT,
+		.procname	= "nf_conntrack_icmp_timeout",
+		.data		= &nf_ct_icmp_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+        { .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_netfilter_table[] = {
+	{
+		.ctl_name       = NET_NETFILTER,
+		.procname       = "netfilter",
+		.mode           = 0555,
+		.child          = nf_ct_sysctl_table,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_net_table[] = {
+	{
+		.ctl_name       = CTL_NET,
+		.procname       = "net",
+		.mode           = 0555,
+		.child          = nf_ct_netfilter_table,
+	},
+	{ .ctl_name = 0 }
+};
+#endif
+
+/* Fast function for those who don't want to parse /proc (and I don't
+   blame them). */
+/* Reversing the socket's dst/src point of view gives us the reply
+   mapping. */
+static int
+getorigdst(struct sock *sk, int optval, void __user *user, int *len)
+{
+	struct inet_sock *inet = inet_sk(sk);
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conntrack_tuple tuple;
+	
+	NF_CT_TUPLE_U_BLANK(&tuple);
+	tuple.src.u3.ip = inet->rcv_saddr;
+	tuple.src.u.tcp.port = inet->sport;
+	tuple.dst.u3.ip = inet->daddr;
+	tuple.dst.u.tcp.port = inet->dport;
+	tuple.src.l3num = PF_INET;
+	tuple.dst.protonum = IPPROTO_TCP;
+
+	/* We only do TCP at the moment: is there a better way? */
+	if (strcmp(sk->sk_prot->name, "TCP")) {
+		DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n");
+		return -ENOPROTOOPT;
+	}
+
+	if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
+		DEBUGP("SO_ORIGINAL_DST: len %u not %u\n",
+		       *len, sizeof(struct sockaddr_in));
+		return -EINVAL;
+	}
+
+	h = nf_conntrack_find_get(&tuple, NULL);
+	if (h) {
+		struct sockaddr_in sin;
+		struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+		sin.sin_family = AF_INET;
+		sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
+			.tuple.dst.u.tcp.port;
+		sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
+			.tuple.dst.u3.ip;
+
+		DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
+		       NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+		nf_ct_put(ct);
+		if (copy_to_user(user, &sin, sizeof(sin)) != 0)
+			return -EFAULT;
+		else
+			return 0;
+	}
+	DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n",
+	       NIPQUAD(tuple.src.u3.ip), ntohs(tuple.src.u.tcp.port),
+	       NIPQUAD(tuple.dst.u3.ip), ntohs(tuple.dst.u.tcp.port));
+	return -ENOENT;
+}
+
+static struct nf_sockopt_ops so_getorigdst = {
+	.pf		= PF_INET,
+	.get_optmin	= SO_ORIGINAL_DST,
+	.get_optmax	= SO_ORIGINAL_DST+1,
+	.get		= &getorigdst,
+};
+
+struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = {
+	.l3proto	 = PF_INET,
+	.name		 = "ipv4",
+	.pkt_to_tuple	 = ipv4_pkt_to_tuple,
+	.invert_tuple	 = ipv4_invert_tuple,
+	.print_tuple	 = ipv4_print_tuple,
+	.print_conntrack = ipv4_print_conntrack,
+	.prepare	 = ipv4_prepare,
+	.get_features	 = ipv4_get_features,
+	.me		 = THIS_MODULE,
+};
+
+extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp4;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_udp4;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_icmp;
+static int init_or_cleanup(int init)
+{
+	int ret = 0;
+
+	if (!init) goto cleanup;
+
+	ret = nf_register_sockopt(&so_getorigdst);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to register netfilter socket option\n");
+		goto cleanup_nothing;
+	}
+
+	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp4);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register tcp.\n");
+		goto cleanup_sockopt;
+	}
+
+	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_udp4);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register udp.\n");
+		goto cleanup_tcp;
+	}
+
+	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_icmp);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register icmp.\n");
+		goto cleanup_udp;
+	}
+
+	ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register ipv4\n");
+		goto cleanup_icmp;
+	}
+
+	ret = nf_register_hook(&ipv4_conntrack_defrag_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register pre-routing defrag hook.\n");
+		goto cleanup_ipv4;
+	}
+	ret = nf_register_hook(&ipv4_conntrack_defrag_local_out_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register local_out defrag hook.\n");
+		goto cleanup_defragops;
+	}
+
+	ret = nf_register_hook(&ipv4_conntrack_in_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register pre-routing hook.\n");
+		goto cleanup_defraglocalops;
+	}
+
+	ret = nf_register_hook(&ipv4_conntrack_local_out_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register local out hook.\n");
+		goto cleanup_inops;
+	}
+
+	ret = nf_register_hook(&ipv4_conntrack_helper_in_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register local helper hook.\n");
+		goto cleanup_inandlocalops;
+	}
+
+	ret = nf_register_hook(&ipv4_conntrack_helper_out_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register postrouting helper hook.\n");
+		goto cleanup_helperinops;
+	}
+
+	ret = nf_register_hook(&ipv4_conntrack_out_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register post-routing hook.\n");
+		goto cleanup_helperoutops;
+	}
+
+	ret = nf_register_hook(&ipv4_conntrack_local_in_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv4: can't register local in hook.\n");
+		goto cleanup_inoutandlocalops;
+	}
+
+#ifdef CONFIG_SYSCTL
+	nf_ct_ipv4_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
+	if (nf_ct_ipv4_sysctl_header == NULL) {
+		printk("nf_conntrack: can't register to sysctl.\n");
+		ret = -ENOMEM;
+		goto cleanup_localinops;
+	}
+#endif
+
+	/* For use by REJECT target */
+	ip_ct_attach = __nf_conntrack_attach;
+
+	return ret;
+
+ cleanup:
+	synchronize_net();
+	ip_ct_attach = NULL;
+#ifdef CONFIG_SYSCTL
+ 	unregister_sysctl_table(nf_ct_ipv4_sysctl_header);
+ cleanup_localinops:
+#endif
+	nf_unregister_hook(&ipv4_conntrack_local_in_ops);
+ cleanup_inoutandlocalops:
+	nf_unregister_hook(&ipv4_conntrack_out_ops);
+ cleanup_helperoutops:
+	nf_unregister_hook(&ipv4_conntrack_helper_out_ops);
+ cleanup_helperinops:
+	nf_unregister_hook(&ipv4_conntrack_helper_in_ops);
+ cleanup_inandlocalops:
+	nf_unregister_hook(&ipv4_conntrack_local_out_ops);
+ cleanup_inops:
+	nf_unregister_hook(&ipv4_conntrack_in_ops);
+ cleanup_defraglocalops:
+	nf_unregister_hook(&ipv4_conntrack_defrag_local_out_ops);
+ cleanup_defragops:
+	nf_unregister_hook(&ipv4_conntrack_defrag_ops);
+ cleanup_ipv4:
+	nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
+ cleanup_icmp:
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmp);
+ cleanup_udp:
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp4);
+ cleanup_tcp:
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp4);
+ cleanup_sockopt:
+	nf_unregister_sockopt(&so_getorigdst);
+ cleanup_nothing:
+	return ret;
+}
+
+MODULE_LICENSE("GPL");
+
+static int __init init(void)
+{
+	need_nf_conntrack();
+	return init_or_cleanup(1);
+}
+
+static void __exit fini(void)
+{
+	init_or_cleanup(0);
+}
+
+module_init(init);
+module_exit(fini);
+
+void need_ip_conntrack(void)
+{
+}
+
+EXPORT_SYMBOL(need_ip_conntrack);
+EXPORT_SYMBOL(nf_ct_ipv4_gather_frags);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
new file mode 100644
index 0000000..7ddb5c0
--- /dev/null
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -0,0 +1,301 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- enable working with Layer 3 protocol independent connection tracking.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <linux/in.h>
+#include <linux/icmp.h>
+#include <linux/seq_file.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_core.h>
+
+unsigned long nf_ct_icmp_timeout = 30*HZ;
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static int icmp_pkt_to_tuple(const struct sk_buff *skb,
+			     unsigned int dataoff,
+			     struct nf_conntrack_tuple *tuple)
+{
+	struct icmphdr _hdr, *hp;
+
+	hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+	if (hp == NULL)
+		return 0;
+
+	tuple->dst.u.icmp.type = hp->type;
+	tuple->src.u.icmp.id = hp->un.echo.id;
+	tuple->dst.u.icmp.code = hp->code;
+
+	return 1;
+}
+
+static int icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
+			     const struct nf_conntrack_tuple *orig)
+{
+	/* Add 1; spaces filled with 0. */
+	static u_int8_t invmap[]
+		= { [ICMP_ECHO] = ICMP_ECHOREPLY + 1,
+		    [ICMP_ECHOREPLY] = ICMP_ECHO + 1,
+		    [ICMP_TIMESTAMP] = ICMP_TIMESTAMPREPLY + 1,
+		    [ICMP_TIMESTAMPREPLY] = ICMP_TIMESTAMP + 1,
+		    [ICMP_INFO_REQUEST] = ICMP_INFO_REPLY + 1,
+		    [ICMP_INFO_REPLY] = ICMP_INFO_REQUEST + 1,
+		    [ICMP_ADDRESS] = ICMP_ADDRESSREPLY + 1,
+		    [ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1};
+
+	if (orig->dst.u.icmp.type >= sizeof(invmap)
+	    || !invmap[orig->dst.u.icmp.type])
+		return 0;
+
+	tuple->src.u.icmp.id = orig->src.u.icmp.id;
+	tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1;
+	tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
+	return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int icmp_print_tuple(struct seq_file *s,
+			    const struct nf_conntrack_tuple *tuple)
+{
+	return seq_printf(s, "type=%u code=%u id=%u ",
+			  tuple->dst.u.icmp.type,
+			  tuple->dst.u.icmp.code,
+			  ntohs(tuple->src.u.icmp.id));
+}
+
+/* Print out the private part of the conntrack. */
+static int icmp_print_conntrack(struct seq_file *s,
+				const struct nf_conn *conntrack)
+{
+	return 0;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int icmp_packet(struct nf_conn *ct,
+		       const struct sk_buff *skb,
+		       unsigned int dataoff,
+		       enum ip_conntrack_info ctinfo,
+		       int pf,
+		       unsigned int hooknum)
+{
+	/* Try to delete connection immediately after all replies:
+           won't actually vanish as we still have skb, and del_timer
+           means this will only run once even if count hits zero twice
+           (theoretically possible with SMP) */
+	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
+		if (atomic_dec_and_test(&ct->proto.icmp.count)
+		    && del_timer(&ct->timeout))
+			ct->timeout.function((unsigned long)ct);
+	} else {
+		atomic_inc(&ct->proto.icmp.count);
+		nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
+		nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout);
+	}
+
+	return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int icmp_new(struct nf_conn *conntrack,
+		    const struct sk_buff *skb, unsigned int dataoff)
+{
+	static u_int8_t valid_new[]
+		= { [ICMP_ECHO] = 1,
+		    [ICMP_TIMESTAMP] = 1,
+		    [ICMP_INFO_REQUEST] = 1,
+		    [ICMP_ADDRESS] = 1 };
+
+	if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new)
+	    || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) {
+		/* Can't create a new ICMP `conn' with this. */
+		DEBUGP("icmp: can't create new conn with type %u\n",
+		       conntrack->tuplehash[0].tuple.dst.u.icmp.type);
+		NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple);
+		return 0;
+	}
+	atomic_set(&conntrack->proto.icmp.count, 0);
+	return 1;
+}
+
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
+/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
+static int
+icmp_error_message(struct sk_buff *skb,
+                 enum ip_conntrack_info *ctinfo,
+                 unsigned int hooknum)
+{
+	struct nf_conntrack_tuple innertuple, origtuple;
+	struct {
+		struct icmphdr icmp;
+		struct iphdr ip;
+	} _in, *inside;
+	struct nf_conntrack_protocol *innerproto;
+	struct nf_conntrack_tuple_hash *h;
+	int dataoff;
+
+	NF_CT_ASSERT(skb->nfct == NULL);
+
+	/* Not enough header? */
+	inside = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_in), &_in);
+	if (inside == NULL)
+		return -NF_ACCEPT;
+
+	/* Ignore ICMP's containing fragments (shouldn't happen) */
+	if (inside->ip.frag_off & htons(IP_OFFSET)) {
+		DEBUGP("icmp_error_message: fragment of proto %u\n",
+		       inside->ip.protocol);
+		return -NF_ACCEPT;
+	}
+
+	innerproto = nf_ct_find_proto(PF_INET, inside->ip.protocol);
+	dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp);
+	/* Are they talking about one of our connections? */
+	if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET,
+			     inside->ip.protocol, &origtuple,
+			     &nf_conntrack_l3proto_ipv4, innerproto)) {
+		DEBUGP("icmp_error_message: ! get_tuple p=%u",
+		       inside->ip.protocol);
+		return -NF_ACCEPT;
+	}
+
+        /* Ordinarily, we'd expect the inverted tupleproto, but it's
+           been preserved inside the ICMP. */
+        if (!nf_ct_invert_tuple(&innertuple, &origtuple,
+				&nf_conntrack_l3proto_ipv4, innerproto)) {
+		DEBUGP("icmp_error_message: no match\n");
+		return -NF_ACCEPT;
+	}
+
+	*ctinfo = IP_CT_RELATED;
+
+	h = nf_conntrack_find_get(&innertuple, NULL);
+	if (!h) {
+		/* Locally generated ICMPs will match inverted if they
+		   haven't been SNAT'ed yet */
+		/* FIXME: NAT code has to handle half-done double NAT --RR */
+		if (hooknum == NF_IP_LOCAL_OUT)
+			h = nf_conntrack_find_get(&origtuple, NULL);
+
+		if (!h) {
+			DEBUGP("icmp_error_message: no match\n");
+			return -NF_ACCEPT;
+		}
+
+		/* Reverse direction from that found */
+		if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
+			*ctinfo += IP_CT_IS_REPLY;
+	} else {
+		if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
+			*ctinfo += IP_CT_IS_REPLY;
+	}
+
+        /* Update skb to refer to this connection */
+        skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
+        skb->nfctinfo = *ctinfo;
+        return -NF_ACCEPT;
+}
+
+/* Small and modified version of icmp_rcv */
+static int
+icmp_error(struct sk_buff *skb, unsigned int dataoff,
+	   enum ip_conntrack_info *ctinfo, int pf, unsigned int hooknum)
+{
+	struct icmphdr _ih, *icmph;
+
+	/* Not enough header? */
+	icmph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_ih), &_ih);
+	if (icmph == NULL) {
+		if (LOG_INVALID(IPPROTO_ICMP))
+			nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
+				      "nf_ct_icmp: short packet ");
+		return -NF_ACCEPT;
+	}
+
+	/* See ip_conntrack_proto_tcp.c */
+	if (hooknum != NF_IP_PRE_ROUTING)
+		goto checksum_skipped;
+
+	switch (skb->ip_summed) {
+	case CHECKSUM_HW:
+		if (!(u16)csum_fold(skb->csum))
+			break;
+		if (LOG_INVALID(IPPROTO_ICMP))
+			nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
+				      "nf_ct_icmp: bad HW ICMP checksum ");
+		return -NF_ACCEPT;
+	case CHECKSUM_NONE:
+		if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) {
+			if (LOG_INVALID(IPPROTO_ICMP))
+				nf_log_packet(PF_INET, 0, skb, NULL, NULL,
+					      NULL,
+					      "nf_ct_icmp: bad ICMP checksum ");
+			return -NF_ACCEPT;
+		}
+	default:
+		break;
+	}
+
+checksum_skipped:
+	/*
+	 *	18 is the highest 'known' ICMP type. Anything else is a mystery
+	 *
+	 *	RFC 1122: 3.2.2  Unknown ICMP messages types MUST be silently
+	 *		  discarded.
+	 */
+	if (icmph->type > NR_ICMP_TYPES) {
+		if (LOG_INVALID(IPPROTO_ICMP))
+			nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
+				      "nf_ct_icmp: invalid ICMP type ");
+		return -NF_ACCEPT;
+	}
+
+	/* Need to track icmp error message? */
+	if (icmph->type != ICMP_DEST_UNREACH
+	    && icmph->type != ICMP_SOURCE_QUENCH
+	    && icmph->type != ICMP_TIME_EXCEEDED
+	    && icmph->type != ICMP_PARAMETERPROB
+	    && icmph->type != ICMP_REDIRECT)
+		return NF_ACCEPT;
+
+	return icmp_error_message(skb, ctinfo, hooknum);
+}
+
+struct nf_conntrack_protocol nf_conntrack_protocol_icmp =
+{
+	.list			= { NULL, NULL },
+	.l3proto		= PF_INET,
+	.proto			= IPPROTO_ICMP,
+	.name			= "icmp",
+	.pkt_to_tuple		= icmp_pkt_to_tuple,
+	.invert_tuple		= icmp_invert_tuple,
+	.print_tuple		= icmp_print_tuple,
+	.print_conntrack	= icmp_print_conntrack,
+	.packet			= icmp_packet,
+	.new			= icmp_new,
+	.error			= icmp_error,
+	.destroy		= NULL,
+	.me			= NULL
+};
+
+EXPORT_SYMBOL(nf_conntrack_protocol_icmp);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 634dabb..ac1fcf5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1110,24 +1110,18 @@
 static int tcp_v4_checksum_init(struct sk_buff *skb)
 {
 	if (skb->ip_summed == CHECKSUM_HW) {
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
 		if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
-				  skb->nh.iph->daddr, skb->csum))
+				  skb->nh.iph->daddr, skb->csum)) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
 			return 0;
-
-		LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v4 csum failed\n");
-		skb->ip_summed = CHECKSUM_NONE;
+		}
 	}
+
+	skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
+				       skb->len, IPPROTO_TCP, 0);
+
 	if (skb->len <= 76) {
-		if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
-				 skb->nh.iph->daddr,
-				 skb_checksum(skb, 0, skb->len, 0)))
-			return -1;
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-	} else {
-		skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
-					  skb->nh.iph->saddr,
-					  skb->nh.iph->daddr, 0);
+		return __skb_checksum_complete(skb);
 	}
 	return 0;
 }
@@ -1219,7 +1213,7 @@
 	 * provided case of th->doff==0 is elimineted.
 	 * So, we defer the checks. */
 	if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
-	     tcp_v4_checksum_init(skb) < 0))
+	     tcp_v4_checksum_init(skb)))
 		goto bad_packet;
 
 	th = skb->h.th;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e0bd101..2422a5f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -761,7 +761,7 @@
 
 static __inline__ int __udp_checksum_complete(struct sk_buff *skb)
 {
-	return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+	return __skb_checksum_complete(skb);
 }
 
 static __inline__ int udp_checksum_complete(struct sk_buff *skb)
@@ -1100,11 +1100,8 @@
 	if (uh->check == 0) {
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	} else if (skb->ip_summed == CHECKSUM_HW) {
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
 		if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
-			return 0;
-		LIMIT_NETDEBUG(KERN_DEBUG "udp v4 hw csum failure.\n");
-		skb->ip_summed = CHECKSUM_NONE;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
 		skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 23e5403..1bdf0fb 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -585,17 +585,16 @@
 	daddr = &skb->nh.ipv6h->daddr;
 
 	/* Perform checksum. */
-	if (skb->ip_summed == CHECKSUM_HW) {
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
-				    skb->csum)) {
-			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 hw checksum failed\n");
-			skb->ip_summed = CHECKSUM_NONE;
-		}
-	}
-	if (skb->ip_summed == CHECKSUM_NONE) {
-		if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
-				    skb_checksum(skb, 0, skb->len, 0))) {
+	switch (skb->ip_summed) {
+	case CHECKSUM_HW:
+		if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
+				     skb->csum))
+			break;
+		/* fall through */
+	case CHECKSUM_NONE:
+		skb->csum = ~csum_ipv6_magic(saddr, daddr, skb->len,
+					     IPPROTO_ICMPV6, 0);
+		if (__skb_checksum_complete(skb)) {
 			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n",
 				       NIP6(*saddr), NIP6(*daddr));
 			goto discard_it;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 6e34804..a6026d2 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -176,6 +176,11 @@
 		if (ipprot->flags & INET6_PROTO_FINAL) {
 			struct ipv6hdr *hdr;	
 
+			/* Free reference early: we don't need it any more,
+			   and it may hold ip_conntrack module loaded
+			   indefinitely. */
+			nf_reset(skb);
+
 			skb_postpull_rcsum(skb, skb->nh.raw,
 					   skb->h.raw - skb->nh.raw);
 			hdr = skb->nh.ipv6h;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index dbd9767..c1fa693 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -441,9 +441,15 @@
 #ifdef CONFIG_NETFILTER
 	to->nfmark = from->nfmark;
 	/* Connection association is same as pre-frag packet */
+	nf_conntrack_put(to->nfct);
 	to->nfct = from->nfct;
 	nf_conntrack_get(to->nfct);
 	to->nfctinfo = from->nfctinfo;
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+	nf_conntrack_put_reasm(to->nfct_reasm);
+	to->nfct_reasm = from->nfct_reasm;
+	nf_conntrack_get_reasm(to->nfct_reasm);
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	nf_bridge_put(to->nf_bridge);
 	to->nf_bridge = from->nf_bridge;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index bb7ccfe..971ba60 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -278,5 +278,19 @@
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/modules.txt>.  If unsure, say `N'.
 
+config NF_CONNTRACK_IPV6
+	tristate "IPv6 support for new connection tracking (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	---help---
+	  Connection tracking keeps a record of what packets have passed
+	  through your machine, in order to figure out how they are related
+	  into connections.
+
+	  This is IPv6 support on Layer 3 independent connection tracking.
+	  Layer 3 independent connection tracking is experimental scheme
+	  which generalize ip_conntrack to support other layer 3 protocols.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 endmenu
 
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2b2c370..9ab5b2c 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -27,3 +27,9 @@
 obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
 obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
 obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
+
+# objects for l3 independent conntrack
+nf_conntrack_ipv6-objs  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
+
+# l3 independent conntrack
+obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
diff --git a/net/ipv6/netfilter/ip6t_MARK.c b/net/ipv6/netfilter/ip6t_MARK.c
index 0c7584f..eab8fb8 100644
--- a/net/ipv6/netfilter/ip6t_MARK.c
+++ b/net/ipv6/netfilter/ip6t_MARK.c
@@ -56,9 +56,9 @@
 	return 1;
 }
 
-static struct ip6t_target ip6t_mark_reg = {
-	.name 		= "MARK",
-	.target 	= target,
+static struct ip6t_target ip6t_mark_reg = { 
+	.name		= "MARK",
+	.target		= target,
 	.checkentry	= checkentry,
 	.me		= THIS_MODULE
 };
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
new file mode 100644
index 0000000..e2c90b3
--- /dev/null
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright (C)2004 USAGI/WIDE Project
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Author:
+ *	Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- support Layer 3 protocol independent connection tracking.
+ *	  Based on the original ip_conntrack code which	had the following
+ *	  copyright information:
+ *		(C) 1999-2001 Paul `Rusty' Russell
+ *		(C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- add get_features() to support various size of conntrack
+ *	  structures.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/ipv6.h>
+#include <linux/in6.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/sysctl.h>
+#include <net/ipv6.h>
+
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_core.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
+
+static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
+			     struct nf_conntrack_tuple *tuple)
+{
+	u_int32_t _addrs[8], *ap;
+
+	ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr),
+				sizeof(_addrs), _addrs);
+	if (ap == NULL)
+		return 0;
+
+	memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
+	memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
+
+	return 1;
+}
+
+static int ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
+			     const struct nf_conntrack_tuple *orig)
+{
+	memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6));
+	memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6));
+
+	return 1;
+}
+
+static int ipv6_print_tuple(struct seq_file *s,
+			    const struct nf_conntrack_tuple *tuple)
+{
+	return seq_printf(s, "src=%x:%x:%x:%x:%x:%x:%x:%x dst=%x:%x:%x:%x:%x:%x:%x:%x ",
+			  NIP6(*((struct in6_addr *)tuple->src.u3.ip6)),
+			  NIP6(*((struct in6_addr *)tuple->dst.u3.ip6)));
+}
+
+static int ipv6_print_conntrack(struct seq_file *s,
+				const struct nf_conn *conntrack)
+{
+	return 0;
+}
+
+/*
+ * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c
+ *
+ * This function parses (probably truncated) exthdr set "hdr"
+ * of length "len". "nexthdrp" initially points to some place,
+ * where type of the first header can be found.
+ *
+ * It skips all well-known exthdrs, and returns pointer to the start
+ * of unparsable area i.e. the first header with unknown type.
+ * if success, *nexthdr is updated by type/protocol of this header.
+ *
+ * NOTES: - it may return pointer pointing beyond end of packet,
+ *          if the last recognized header is truncated in the middle.
+ *        - if packet is truncated, so that all parsed headers are skipped,
+ *          it returns -1.
+ *        - if packet is fragmented, return pointer of the fragment header.
+ *        - ESP is unparsable for now and considered like
+ *          normal payload protocol.
+ *        - Note also special handling of AUTH header. Thanks to IPsec wizards.
+ */
+
+int nf_ct_ipv6_skip_exthdr(struct sk_buff *skb, int start, u8 *nexthdrp,
+			   int len)
+{
+	u8 nexthdr = *nexthdrp;
+
+	while (ipv6_ext_hdr(nexthdr)) {
+		struct ipv6_opt_hdr hdr;
+		int hdrlen;
+
+		if (len < (int)sizeof(struct ipv6_opt_hdr))
+			return -1;
+		if (nexthdr == NEXTHDR_NONE)
+			break;
+		if (nexthdr == NEXTHDR_FRAGMENT)
+			break;
+		if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
+			BUG();
+		if (nexthdr == NEXTHDR_AUTH)
+			hdrlen = (hdr.hdrlen+2)<<2;
+		else
+			hdrlen = ipv6_optlen(&hdr);
+
+		nexthdr = hdr.nexthdr;
+		len -= hdrlen;
+		start += hdrlen;
+	}
+
+	*nexthdrp = nexthdr;
+	return start;
+}
+
+static int
+ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
+	     u_int8_t *protonum)
+{
+	unsigned int extoff;
+	unsigned char pnum;
+	int protoff;
+
+	extoff = (u8*)((*pskb)->nh.ipv6h + 1) - (*pskb)->data;
+	pnum = (*pskb)->nh.ipv6h->nexthdr;
+
+	protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum,
+					 (*pskb)->len - extoff);
+
+	/*
+	 * (protoff == (*pskb)->len) mean that the packet doesn't have no data
+	 * except of IPv6 & ext headers. but it's tracked anyway. - YK
+	 */
+	if ((protoff < 0) || (protoff > (*pskb)->len)) {
+		DEBUGP("ip6_conntrack_core: can't find proto in pkt\n");
+		NF_CT_STAT_INC(error);
+		NF_CT_STAT_INC(invalid);
+		return -NF_ACCEPT;
+	}
+
+	*dataoff = protoff;
+	*protonum = pnum;
+	return NF_ACCEPT;
+}
+
+static u_int32_t ipv6_get_features(const struct nf_conntrack_tuple *tuple)
+{
+	return NF_CT_F_BASIC;
+}
+
+static unsigned int ipv6_confirm(unsigned int hooknum,
+				 struct sk_buff **pskb,
+				 const struct net_device *in,
+				 const struct net_device *out,
+				 int (*okfn)(struct sk_buff *))
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	/* This is where we call the helper: as the packet goes out. */
+	ct = nf_ct_get(*pskb, &ctinfo);
+	if (ct && ct->helper) {
+		unsigned int ret, protoff;
+		unsigned int extoff = (u8*)((*pskb)->nh.ipv6h + 1)
+				      - (*pskb)->data;
+		unsigned char pnum = (*pskb)->nh.ipv6h->nexthdr;
+
+		protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum,
+						 (*pskb)->len - extoff);
+		if (protoff < 0 || protoff > (*pskb)->len ||
+		    pnum == NEXTHDR_FRAGMENT) {
+			DEBUGP("proto header not found\n");
+			return NF_ACCEPT;
+		}
+
+		ret = ct->helper->help(pskb, protoff, ct, ctinfo);
+		if (ret != NF_ACCEPT)
+			return ret;
+	}
+
+	/* We've seen it coming out the other side: confirm it */
+
+	return nf_conntrack_confirm(pskb);
+}
+
+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
+extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+			       struct net_device *in,
+			       struct net_device *out,
+			       int (*okfn)(struct sk_buff *));
+static unsigned int ipv6_defrag(unsigned int hooknum,
+				struct sk_buff **pskb,
+				const struct net_device *in,
+				const struct net_device *out,
+				int (*okfn)(struct sk_buff *))
+{
+	struct sk_buff *reasm;
+
+	/* Previously seen (loopback)?  */
+	if ((*pskb)->nfct)
+		return NF_ACCEPT;
+
+	reasm = nf_ct_frag6_gather(*pskb);
+
+	/* queued */
+	if (reasm == NULL)
+		return NF_STOLEN;
+
+	/* error occured or not fragmented */
+	if (reasm == *pskb)
+		return NF_ACCEPT;
+
+	nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in,
+			   (struct net_device *)out, okfn);
+
+	return NF_STOLEN;
+}
+
+static unsigned int ipv6_conntrack_in(unsigned int hooknum,
+				      struct sk_buff **pskb,
+				      const struct net_device *in,
+				      const struct net_device *out,
+				      int (*okfn)(struct sk_buff *))
+{
+	struct sk_buff *reasm = (*pskb)->nfct_reasm;
+
+	/* This packet is fragmented and has reassembled packet. */
+	if (reasm) {
+		/* Reassembled packet isn't parsed yet ? */
+		if (!reasm->nfct) {
+			unsigned int ret;
+
+			ret = nf_conntrack_in(PF_INET6, hooknum, &reasm);
+			if (ret != NF_ACCEPT)
+				return ret;
+		}
+		nf_conntrack_get(reasm->nfct);
+		(*pskb)->nfct = reasm->nfct;
+		return NF_ACCEPT;
+	}
+
+	return nf_conntrack_in(PF_INET6, hooknum, pskb);
+}
+
+static unsigned int ipv6_conntrack_local(unsigned int hooknum,
+					 struct sk_buff **pskb,
+					 const struct net_device *in,
+					 const struct net_device *out,
+					 int (*okfn)(struct sk_buff *))
+{
+	/* root is playing with raw sockets. */
+	if ((*pskb)->len < sizeof(struct ipv6hdr)) {
+		if (net_ratelimit())
+			printk("ipv6_conntrack_local: packet too short\n");
+		return NF_ACCEPT;
+	}
+	return ipv6_conntrack_in(hooknum, pskb, in, out, okfn);
+}
+
+/* Connection tracking may drop packets, but never alters them, so
+   make it the first hook. */
+static struct nf_hook_ops ipv6_conntrack_defrag_ops = {
+	.hook		= ipv6_defrag,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET6,
+	.hooknum	= NF_IP6_PRE_ROUTING,
+	.priority	= NF_IP6_PRI_CONNTRACK_DEFRAG,
+};
+
+static struct nf_hook_ops ipv6_conntrack_in_ops = {
+	.hook		= ipv6_conntrack_in,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET6,
+	.hooknum	= NF_IP6_PRE_ROUTING,
+	.priority	= NF_IP6_PRI_CONNTRACK,
+};
+
+static struct nf_hook_ops ipv6_conntrack_local_out_ops = {
+	.hook		= ipv6_conntrack_local,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET6,
+	.hooknum	= NF_IP6_LOCAL_OUT,
+	.priority	= NF_IP6_PRI_CONNTRACK,
+};
+
+static struct nf_hook_ops ipv6_conntrack_defrag_local_out_ops = {
+	.hook		= ipv6_defrag,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET6,
+	.hooknum	= NF_IP6_LOCAL_OUT,
+	.priority	= NF_IP6_PRI_CONNTRACK_DEFRAG,
+};
+
+/* Refragmenter; last chance. */
+static struct nf_hook_ops ipv6_conntrack_out_ops = {
+	.hook		= ipv6_confirm,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET6,
+	.hooknum	= NF_IP6_POST_ROUTING,
+	.priority	= NF_IP6_PRI_LAST,
+};
+
+static struct nf_hook_ops ipv6_conntrack_local_in_ops = {
+	.hook		= ipv6_confirm,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET6,
+	.hooknum	= NF_IP6_LOCAL_IN,
+	.priority	= NF_IP6_PRI_LAST-1,
+};
+
+#ifdef CONFIG_SYSCTL
+
+/* From nf_conntrack_proto_icmpv6.c */
+extern unsigned long nf_ct_icmpv6_timeout;
+
+/* From nf_conntrack_frag6.c */
+extern unsigned long nf_ct_frag6_timeout;
+extern unsigned long nf_ct_frag6_low_thresh;
+extern unsigned long nf_ct_frag6_high_thresh;
+
+static struct ctl_table_header *nf_ct_ipv6_sysctl_header;
+
+static ctl_table nf_ct_sysctl_table[] = {
+	{
+		.ctl_name	= NET_NF_CONNTRACK_ICMPV6_TIMEOUT,
+		.procname	= "nf_conntrack_icmpv6_timeout",
+		.data		= &nf_ct_icmpv6_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_FRAG6_TIMEOUT,
+		.procname	= "nf_conntrack_frag6_timeout",
+		.data		= &nf_ct_frag6_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_FRAG6_LOW_THRESH,
+		.procname	= "nf_conntrack_frag6_low_thresh",
+		.data		= &nf_ct_frag6_low_thresh,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_FRAG6_HIGH_THRESH,
+		.procname	= "nf_conntrack_frag6_high_thresh",
+		.data		= &nf_ct_frag6_high_thresh,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+        { .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_netfilter_table[] = {
+	{
+		.ctl_name	= NET_NETFILTER,
+		.procname	= "netfilter",
+		.mode		= 0555,
+		.child		= nf_ct_sysctl_table,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_net_table[] = {
+	{
+		.ctl_name	= CTL_NET,
+		.procname	= "net",
+		.mode		= 0555,
+		.child		= nf_ct_netfilter_table,
+	},
+	{ .ctl_name = 0 }
+};
+#endif
+
+struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = {
+	.l3proto		= PF_INET6,
+	.name			= "ipv6",
+	.pkt_to_tuple		= ipv6_pkt_to_tuple,
+	.invert_tuple		= ipv6_invert_tuple,
+	.print_tuple		= ipv6_print_tuple,
+	.print_conntrack	= ipv6_print_conntrack,
+	.prepare		= ipv6_prepare,
+	.get_features		= ipv6_get_features,
+	.me			= THIS_MODULE,
+};
+
+extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp6;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_udp6;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6;
+extern int nf_ct_frag6_init(void);
+extern void nf_ct_frag6_cleanup(void);
+static int init_or_cleanup(int init)
+{
+	int ret = 0;
+
+	if (!init) goto cleanup;
+
+	ret = nf_ct_frag6_init();
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't initialize frag6.\n");
+		goto cleanup_nothing;
+	}
+	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp6);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register tcp.\n");
+		goto cleanup_frag6;
+	}
+
+	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_udp6);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register udp.\n");
+		goto cleanup_tcp;
+	}
+
+	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_icmpv6);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register icmpv6.\n");
+		goto cleanup_udp;
+	}
+
+	ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register ipv6\n");
+		goto cleanup_icmpv6;
+	}
+
+	ret = nf_register_hook(&ipv6_conntrack_defrag_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register pre-routing defrag "
+		       "hook.\n");
+		goto cleanup_ipv6;
+	}
+
+	ret = nf_register_hook(&ipv6_conntrack_defrag_local_out_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register local_out defrag "
+		       "hook.\n");
+		goto cleanup_defragops;
+	}
+
+	ret = nf_register_hook(&ipv6_conntrack_in_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register pre-routing hook.\n");
+		goto cleanup_defraglocalops;
+	}
+
+	ret = nf_register_hook(&ipv6_conntrack_local_out_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register local out hook.\n");
+		goto cleanup_inops;
+	}
+
+	ret = nf_register_hook(&ipv6_conntrack_out_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register post-routing hook.\n");
+		goto cleanup_inandlocalops;
+	}
+
+	ret = nf_register_hook(&ipv6_conntrack_local_in_ops);
+	if (ret < 0) {
+		printk("nf_conntrack_ipv6: can't register local in hook.\n");
+		goto cleanup_inoutandlocalops;
+	}
+
+#ifdef CONFIG_SYSCTL
+	nf_ct_ipv6_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
+	if (nf_ct_ipv6_sysctl_header == NULL) {
+		printk("nf_conntrack: can't register to sysctl.\n");
+		ret = -ENOMEM;
+		goto cleanup_localinops;
+	}
+#endif
+	return ret;
+
+ cleanup:
+	synchronize_net();
+#ifdef CONFIG_SYSCTL
+ 	unregister_sysctl_table(nf_ct_ipv6_sysctl_header);
+ cleanup_localinops:
+#endif
+	nf_unregister_hook(&ipv6_conntrack_local_in_ops);
+ cleanup_inoutandlocalops:
+	nf_unregister_hook(&ipv6_conntrack_out_ops);
+ cleanup_inandlocalops:
+	nf_unregister_hook(&ipv6_conntrack_local_out_ops);
+ cleanup_inops:
+	nf_unregister_hook(&ipv6_conntrack_in_ops);
+ cleanup_defraglocalops:
+	nf_unregister_hook(&ipv6_conntrack_defrag_local_out_ops);
+ cleanup_defragops:
+	nf_unregister_hook(&ipv6_conntrack_defrag_ops);
+ cleanup_ipv6:
+	nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
+ cleanup_icmpv6:
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmpv6);
+ cleanup_udp:
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp6);
+ cleanup_tcp:
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6);
+ cleanup_frag6:
+	nf_ct_frag6_cleanup();
+ cleanup_nothing:
+	return ret;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
+
+static int __init init(void)
+{
+	need_nf_conntrack();
+	return init_or_cleanup(1);
+}
+
+static void __exit fini(void)
+{
+	init_or_cleanup(0);
+}
+
+module_init(init);
+module_exit(fini);
+
+void need_ip6_conntrack(void)
+{
+}
+
+EXPORT_SYMBOL(need_ip6_conntrack);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
new file mode 100644
index 0000000..c0f1da5
--- /dev/null
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C)2003,2004 USAGI/WIDE Project
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Author:
+ *	Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- ICMPv6 tracking support. Derived from the original ip_conntrack code
+ *	  net/ipv4/netfilter/ip_conntrack_proto_icmp.c which had the following
+ *	  copyright information:
+ *		(C) 1999-2001 Paul `Rusty' Russell
+ *		(C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/in6.h>
+#include <linux/icmpv6.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/seq_file.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
+
+unsigned long nf_ct_icmpv6_timeout = 30*HZ;
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static int icmpv6_pkt_to_tuple(const struct sk_buff *skb,
+			       unsigned int dataoff,
+			       struct nf_conntrack_tuple *tuple)
+{
+	struct icmp6hdr _hdr, *hp;
+
+	hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+	if (hp == NULL)
+		return 0;
+	tuple->dst.u.icmp.type = hp->icmp6_type;
+	tuple->src.u.icmp.id = hp->icmp6_identifier;
+	tuple->dst.u.icmp.code = hp->icmp6_code;
+
+	return 1;
+}
+
+static int icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
+			       const struct nf_conntrack_tuple *orig)
+{
+	/* Add 1; spaces filled with 0. */
+	static u_int8_t invmap[] = {
+		[ICMPV6_ECHO_REQUEST - 128]	= ICMPV6_ECHO_REPLY + 1,
+		[ICMPV6_ECHO_REPLY - 128]	= ICMPV6_ECHO_REQUEST + 1,
+		[ICMPV6_NI_QUERY - 128]		= ICMPV6_NI_QUERY + 1,
+		[ICMPV6_NI_REPLY - 128]		= ICMPV6_NI_REPLY +1
+	};
+
+	__u8 type = orig->dst.u.icmp.type - 128;
+	if (type >= sizeof(invmap) || !invmap[type])
+		return 0;
+
+	tuple->src.u.icmp.id   = orig->src.u.icmp.id;
+	tuple->dst.u.icmp.type = invmap[type] - 1;
+	tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
+	return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int icmpv6_print_tuple(struct seq_file *s,
+			      const struct nf_conntrack_tuple *tuple)
+{
+	return seq_printf(s, "type=%u code=%u id=%u ",
+			  tuple->dst.u.icmp.type,
+			  tuple->dst.u.icmp.code,
+			  ntohs(tuple->src.u.icmp.id));
+}
+
+/* Print out the private part of the conntrack. */
+static int icmpv6_print_conntrack(struct seq_file *s,
+				  const struct nf_conn *conntrack)
+{
+	return 0;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int icmpv6_packet(struct nf_conn *ct,
+		       const struct sk_buff *skb,
+		       unsigned int dataoff,
+		       enum ip_conntrack_info ctinfo,
+		       int pf,
+		       unsigned int hooknum)
+{
+	/* Try to delete connection immediately after all replies:
+           won't actually vanish as we still have skb, and del_timer
+           means this will only run once even if count hits zero twice
+           (theoretically possible with SMP) */
+	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
+		if (atomic_dec_and_test(&ct->proto.icmp.count)
+		    && del_timer(&ct->timeout))
+			ct->timeout.function((unsigned long)ct);
+	} else {
+		atomic_inc(&ct->proto.icmp.count);
+		nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
+		nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout);
+	}
+
+	return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int icmpv6_new(struct nf_conn *conntrack,
+		      const struct sk_buff *skb,
+		      unsigned int dataoff)
+{
+	static u_int8_t valid_new[] = {
+		[ICMPV6_ECHO_REQUEST - 128] = 1,
+		[ICMPV6_NI_QUERY - 128] = 1
+	};
+
+	if (conntrack->tuplehash[0].tuple.dst.u.icmp.type - 128 >= sizeof(valid_new)
+	    || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type - 128]) {
+		/* Can't create a new ICMPv6 `conn' with this. */
+		DEBUGP("icmp: can't create new conn with type %u\n",
+		       conntrack->tuplehash[0].tuple.dst.u.icmp.type);
+		NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple);
+		return 0;
+	}
+	atomic_set(&conntrack->proto.icmp.count, 0);
+	return 1;
+}
+
+extern int
+nf_ct_ipv6_skip_exthdr(struct sk_buff *skb, int start, u8 *nexthdrp, int len);
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
+static int
+icmpv6_error_message(struct sk_buff *skb,
+		     unsigned int icmp6off,
+		     enum ip_conntrack_info *ctinfo,
+		     unsigned int hooknum)
+{
+	struct nf_conntrack_tuple intuple, origtuple;
+	struct nf_conntrack_tuple_hash *h;
+	struct icmp6hdr _hdr, *hp;
+	unsigned int inip6off;
+	struct nf_conntrack_protocol *inproto;
+	u_int8_t inprotonum;
+	unsigned int inprotoff;
+
+	NF_CT_ASSERT(skb->nfct == NULL);
+
+	hp = skb_header_pointer(skb, icmp6off, sizeof(_hdr), &_hdr);
+	if (hp == NULL) {
+		DEBUGP("icmpv6_error: Can't get ICMPv6 hdr.\n");
+		return -NF_ACCEPT;
+	}
+
+	inip6off = icmp6off + sizeof(_hdr);
+	if (skb_copy_bits(skb, inip6off+offsetof(struct ipv6hdr, nexthdr),
+			  &inprotonum, sizeof(inprotonum)) != 0) {
+		DEBUGP("icmpv6_error: Can't get nexthdr in inner IPv6 header.\n");
+		return -NF_ACCEPT;
+	}
+	inprotoff = nf_ct_ipv6_skip_exthdr(skb,
+					   inip6off + sizeof(struct ipv6hdr),
+					   &inprotonum,
+					   skb->len - inip6off
+						    - sizeof(struct ipv6hdr));
+
+	if ((inprotoff < 0) || (inprotoff > skb->len) ||
+	    (inprotonum == NEXTHDR_FRAGMENT)) {
+		DEBUGP("icmpv6_error: Can't get protocol header in ICMPv6 payload.\n");
+		return -NF_ACCEPT;
+	}
+
+	inproto = nf_ct_find_proto(PF_INET6, inprotonum);
+
+	/* Are they talking about one of our connections? */
+	if (!nf_ct_get_tuple(skb, inip6off, inprotoff, PF_INET6, inprotonum,
+			     &origtuple, &nf_conntrack_l3proto_ipv6, inproto)) {
+		DEBUGP("icmpv6_error: Can't get tuple\n");
+		return -NF_ACCEPT;
+	}
+
+	/* Ordinarily, we'd expect the inverted tupleproto, but it's
+	   been preserved inside the ICMP. */
+	if (!nf_ct_invert_tuple(&intuple, &origtuple,
+				&nf_conntrack_l3proto_ipv6, inproto)) {
+		DEBUGP("icmpv6_error: Can't invert tuple\n");
+		return -NF_ACCEPT;
+	}
+
+	*ctinfo = IP_CT_RELATED;
+
+	h = nf_conntrack_find_get(&intuple, NULL);
+	if (!h) {
+		DEBUGP("icmpv6_error: no match\n");
+		return -NF_ACCEPT;
+	} else {
+		if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
+			*ctinfo += IP_CT_IS_REPLY;
+	}
+
+	/* Update skb to refer to this connection */
+	skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
+	skb->nfctinfo = *ctinfo;
+	return -NF_ACCEPT;
+}
+
+static int
+icmpv6_error(struct sk_buff *skb, unsigned int dataoff,
+	     enum ip_conntrack_info *ctinfo, int pf, unsigned int hooknum)
+{
+	struct icmp6hdr _ih, *icmp6h;
+
+	icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
+	if (icmp6h == NULL) {
+		if (LOG_INVALID(IPPROTO_ICMPV6))
+		nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
+			      "nf_ct_icmpv6: short packet ");
+		return -NF_ACCEPT;
+	}
+
+	if (hooknum != NF_IP6_PRE_ROUTING)
+		goto skipped;
+
+	/* Ignore it if the checksum's bogus. */
+	if (csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
+			    skb->len - dataoff, IPPROTO_ICMPV6,
+			    skb_checksum(skb, dataoff,
+					 skb->len - dataoff, 0))) {
+		nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
+			      "nf_ct_icmpv6: ICMPv6 checksum failed\n");
+		return -NF_ACCEPT;
+	}
+
+skipped:
+
+	/* is not error message ? */
+	if (icmp6h->icmp6_type >= 128)
+		return NF_ACCEPT;
+
+	return icmpv6_error_message(skb, dataoff, ctinfo, hooknum);
+}
+
+struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6 =
+{
+	.l3proto		= PF_INET6,
+	.proto			= IPPROTO_ICMPV6,
+	.name			= "icmpv6",
+	.pkt_to_tuple		= icmpv6_pkt_to_tuple,
+	.invert_tuple		= icmpv6_invert_tuple,
+	.print_tuple		= icmpv6_print_tuple,
+	.print_conntrack	= icmpv6_print_conntrack,
+	.packet			= icmpv6_packet,
+	.new			= icmpv6_new,
+	.error			= icmpv6_error,
+};
+
+EXPORT_SYMBOL(nf_conntrack_protocol_icmpv6);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
new file mode 100644
index 0000000..7640b9b
--- /dev/null
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -0,0 +1,885 @@
+/*
+ * IPv6 fragment reassembly for connection tracking
+ *
+ * Copyright (C)2004 USAGI/WIDE Project
+ *
+ * Author:
+ *	Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *
+ * Based on: net/ipv6/reassembly.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/jiffies.h>
+#include <linux/net.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/in6.h>
+#include <linux/ipv6.h>
+#include <linux/icmpv6.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+
+#include <net/sock.h>
+#include <net/snmp.h>
+
+#include <net/ipv6.h>
+#include <net/protocol.h>
+#include <net/transp_v6.h>
+#include <net/rawv6.h>
+#include <net/ndisc.h>
+#include <net/addrconf.h>
+#include <linux/sysctl.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
+#define NF_CT_FRAG6_LOW_THRESH 196608  /* == 192*1024 */
+#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
+
+int nf_ct_frag6_high_thresh = 256*1024;
+int nf_ct_frag6_low_thresh = 192*1024;
+int nf_ct_frag6_timeout = IPV6_FRAG_TIMEOUT;
+
+struct nf_ct_frag6_skb_cb
+{
+	struct inet6_skb_parm	h;
+	int			offset;
+	struct sk_buff		*orig;
+};
+
+#define NFCT_FRAG6_CB(skb)	((struct nf_ct_frag6_skb_cb*)((skb)->cb))
+
+struct nf_ct_frag6_queue
+{
+	struct nf_ct_frag6_queue	*next;
+	struct list_head lru_list;		/* lru list member	*/
+
+	__u32			id;		/* fragment id		*/
+	struct in6_addr		saddr;
+	struct in6_addr		daddr;
+
+	spinlock_t		lock;
+	atomic_t		refcnt;
+	struct timer_list	timer;		/* expire timer		*/
+	struct sk_buff		*fragments;
+	int			len;
+	int			meat;
+	struct timeval		stamp;
+	unsigned int		csum;
+	__u8			last_in;	/* has first/last segment arrived? */
+#define COMPLETE		4
+#define FIRST_IN		2
+#define LAST_IN			1
+	__u16			nhoffset;
+	struct nf_ct_frag6_queue	**pprev;
+};
+
+/* Hash table. */
+
+#define FRAG6Q_HASHSZ	64
+
+static struct nf_ct_frag6_queue *nf_ct_frag6_hash[FRAG6Q_HASHSZ];
+static rwlock_t nf_ct_frag6_lock = RW_LOCK_UNLOCKED;
+static u32 nf_ct_frag6_hash_rnd;
+static LIST_HEAD(nf_ct_frag6_lru_list);
+int nf_ct_frag6_nqueues = 0;
+
+static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq)
+{
+	if (fq->next)
+		fq->next->pprev = fq->pprev;
+	*fq->pprev = fq->next;
+	list_del(&fq->lru_list);
+	nf_ct_frag6_nqueues--;
+}
+
+static __inline__ void fq_unlink(struct nf_ct_frag6_queue *fq)
+{
+	write_lock(&nf_ct_frag6_lock);
+	__fq_unlink(fq);
+	write_unlock(&nf_ct_frag6_lock);
+}
+
+static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
+			       struct in6_addr *daddr)
+{
+	u32 a, b, c;
+
+	a = saddr->s6_addr32[0];
+	b = saddr->s6_addr32[1];
+	c = saddr->s6_addr32[2];
+
+	a += JHASH_GOLDEN_RATIO;
+	b += JHASH_GOLDEN_RATIO;
+	c += nf_ct_frag6_hash_rnd;
+	__jhash_mix(a, b, c);
+
+	a += saddr->s6_addr32[3];
+	b += daddr->s6_addr32[0];
+	c += daddr->s6_addr32[1];
+	__jhash_mix(a, b, c);
+
+	a += daddr->s6_addr32[2];
+	b += daddr->s6_addr32[3];
+	c += id;
+	__jhash_mix(a, b, c);
+
+	return c & (FRAG6Q_HASHSZ - 1);
+}
+
+static struct timer_list nf_ct_frag6_secret_timer;
+int nf_ct_frag6_secret_interval = 10 * 60 * HZ;
+
+static void nf_ct_frag6_secret_rebuild(unsigned long dummy)
+{
+	unsigned long now = jiffies;
+	int i;
+
+	write_lock(&nf_ct_frag6_lock);
+	get_random_bytes(&nf_ct_frag6_hash_rnd, sizeof(u32));
+	for (i = 0; i < FRAG6Q_HASHSZ; i++) {
+		struct nf_ct_frag6_queue *q;
+
+		q = nf_ct_frag6_hash[i];
+		while (q) {
+			struct nf_ct_frag6_queue *next = q->next;
+			unsigned int hval = ip6qhashfn(q->id,
+						       &q->saddr,
+						       &q->daddr);
+
+			if (hval != i) {
+				/* Unlink. */
+				if (q->next)
+					q->next->pprev = q->pprev;
+				*q->pprev = q->next;
+
+				/* Relink to new hash chain. */
+				if ((q->next = nf_ct_frag6_hash[hval]) != NULL)
+					q->next->pprev = &q->next;
+				nf_ct_frag6_hash[hval] = q;
+				q->pprev = &nf_ct_frag6_hash[hval];
+			}
+
+			q = next;
+		}
+	}
+	write_unlock(&nf_ct_frag6_lock);
+
+	mod_timer(&nf_ct_frag6_secret_timer, now + nf_ct_frag6_secret_interval);
+}
+
+atomic_t nf_ct_frag6_mem = ATOMIC_INIT(0);
+
+/* Memory Tracking Functions. */
+static inline void frag_kfree_skb(struct sk_buff *skb)
+{
+	atomic_sub(skb->truesize, &nf_ct_frag6_mem);
+	if (NFCT_FRAG6_CB(skb)->orig)
+		kfree_skb(NFCT_FRAG6_CB(skb)->orig);
+
+	kfree_skb(skb);
+}
+
+static inline void frag_free_queue(struct nf_ct_frag6_queue *fq)
+{
+	atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem);
+	kfree(fq);
+}
+
+static inline struct nf_ct_frag6_queue *frag_alloc_queue(void)
+{
+	struct nf_ct_frag6_queue *fq = kmalloc(sizeof(struct nf_ct_frag6_queue), GFP_ATOMIC);
+
+	if (!fq)
+		return NULL;
+	atomic_add(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem);
+	return fq;
+}
+
+/* Destruction primitives. */
+
+/* Complete destruction of fq. */
+static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq)
+{
+	struct sk_buff *fp;
+
+	BUG_TRAP(fq->last_in&COMPLETE);
+	BUG_TRAP(del_timer(&fq->timer) == 0);
+
+	/* Release all fragment data. */
+	fp = fq->fragments;
+	while (fp) {
+		struct sk_buff *xp = fp->next;
+
+		frag_kfree_skb(fp);
+		fp = xp;
+	}
+
+	frag_free_queue(fq);
+}
+
+static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
+{
+	if (atomic_dec_and_test(&fq->refcnt))
+		nf_ct_frag6_destroy(fq);
+}
+
+/* Kill fq entry. It is not destroyed immediately,
+ * because caller (and someone more) holds reference count.
+ */
+static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
+{
+	if (del_timer(&fq->timer))
+		atomic_dec(&fq->refcnt);
+
+	if (!(fq->last_in & COMPLETE)) {
+		fq_unlink(fq);
+		atomic_dec(&fq->refcnt);
+		fq->last_in |= COMPLETE;
+	}
+}
+
+static void nf_ct_frag6_evictor(void)
+{
+	struct nf_ct_frag6_queue *fq;
+	struct list_head *tmp;
+
+	for (;;) {
+		if (atomic_read(&nf_ct_frag6_mem) <= nf_ct_frag6_low_thresh)
+			return;
+		read_lock(&nf_ct_frag6_lock);
+		if (list_empty(&nf_ct_frag6_lru_list)) {
+			read_unlock(&nf_ct_frag6_lock);
+			return;
+		}
+		tmp = nf_ct_frag6_lru_list.next;
+		fq = list_entry(tmp, struct nf_ct_frag6_queue, lru_list);
+		atomic_inc(&fq->refcnt);
+		read_unlock(&nf_ct_frag6_lock);
+
+		spin_lock(&fq->lock);
+		if (!(fq->last_in&COMPLETE))
+			fq_kill(fq);
+		spin_unlock(&fq->lock);
+
+		fq_put(fq);
+	}
+}
+
+static void nf_ct_frag6_expire(unsigned long data)
+{
+	struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data;
+
+	spin_lock(&fq->lock);
+
+	if (fq->last_in & COMPLETE)
+		goto out;
+
+	fq_kill(fq);
+
+out:
+	spin_unlock(&fq->lock);
+	fq_put(fq);
+}
+
+/* Creation primitives. */
+
+
+static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
+					  struct nf_ct_frag6_queue *fq_in)
+{
+	struct nf_ct_frag6_queue *fq;
+
+	write_lock(&nf_ct_frag6_lock);
+#ifdef CONFIG_SMP
+	for (fq = nf_ct_frag6_hash[hash]; fq; fq = fq->next) {
+		if (fq->id == fq_in->id && 
+		    !ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) &&
+		    !ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) {
+			atomic_inc(&fq->refcnt);
+			write_unlock(&nf_ct_frag6_lock);
+			fq_in->last_in |= COMPLETE;
+			fq_put(fq_in);
+			return fq;
+		}
+	}
+#endif
+	fq = fq_in;
+
+	if (!mod_timer(&fq->timer, jiffies + nf_ct_frag6_timeout))
+		atomic_inc(&fq->refcnt);
+
+	atomic_inc(&fq->refcnt);
+	if ((fq->next = nf_ct_frag6_hash[hash]) != NULL)
+		fq->next->pprev = &fq->next;
+	nf_ct_frag6_hash[hash] = fq;
+	fq->pprev = &nf_ct_frag6_hash[hash];
+	INIT_LIST_HEAD(&fq->lru_list);
+	list_add_tail(&fq->lru_list, &nf_ct_frag6_lru_list);
+	nf_ct_frag6_nqueues++;
+	write_unlock(&nf_ct_frag6_lock);
+	return fq;
+}
+
+
+static struct nf_ct_frag6_queue *
+nf_ct_frag6_create(unsigned int hash, u32 id, struct in6_addr *src,				   struct in6_addr *dst)
+{
+	struct nf_ct_frag6_queue *fq;
+
+	if ((fq = frag_alloc_queue()) == NULL) {
+		DEBUGP("Can't alloc new queue\n");
+		goto oom;
+	}
+
+	memset(fq, 0, sizeof(struct nf_ct_frag6_queue));
+
+	fq->id = id;
+	ipv6_addr_copy(&fq->saddr, src);
+	ipv6_addr_copy(&fq->daddr, dst);
+
+	init_timer(&fq->timer);
+	fq->timer.function = nf_ct_frag6_expire;
+	fq->timer.data = (long) fq;
+	fq->lock = SPIN_LOCK_UNLOCKED;
+	atomic_set(&fq->refcnt, 1);
+
+	return nf_ct_frag6_intern(hash, fq);
+
+oom:
+	return NULL;
+}
+
+static __inline__ struct nf_ct_frag6_queue *
+fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
+{
+	struct nf_ct_frag6_queue *fq;
+	unsigned int hash = ip6qhashfn(id, src, dst);
+
+	read_lock(&nf_ct_frag6_lock);
+	for (fq = nf_ct_frag6_hash[hash]; fq; fq = fq->next) {
+		if (fq->id == id && 
+		    !ipv6_addr_cmp(src, &fq->saddr) &&
+		    !ipv6_addr_cmp(dst, &fq->daddr)) {
+			atomic_inc(&fq->refcnt);
+			read_unlock(&nf_ct_frag6_lock);
+			return fq;
+		}
+	}
+	read_unlock(&nf_ct_frag6_lock);
+
+	return nf_ct_frag6_create(hash, id, src, dst);
+}
+
+
+static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, 
+			     struct frag_hdr *fhdr, int nhoff)
+{
+	struct sk_buff *prev, *next;
+	int offset, end;
+
+	if (fq->last_in & COMPLETE) {
+		DEBUGP("Allready completed\n");
+		goto err;
+	}
+
+	offset = ntohs(fhdr->frag_off) & ~0x7;
+	end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
+			((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
+
+	if ((unsigned int)end > IPV6_MAXPLEN) {
+		DEBUGP("offset is too large.\n");
+ 		return -1;
+	}
+
+ 	if (skb->ip_summed == CHECKSUM_HW)
+ 		skb->csum = csum_sub(skb->csum,
+ 				     csum_partial(skb->nh.raw,
+						  (u8*)(fhdr + 1) - skb->nh.raw,
+						  0));
+
+	/* Is this the final fragment? */
+	if (!(fhdr->frag_off & htons(IP6_MF))) {
+		/* If we already have some bits beyond end
+		 * or have different end, the segment is corrupted.
+		 */
+		if (end < fq->len ||
+		    ((fq->last_in & LAST_IN) && end != fq->len)) {
+			DEBUGP("already received last fragment\n");
+			goto err;
+		}
+		fq->last_in |= LAST_IN;
+		fq->len = end;
+	} else {
+		/* Check if the fragment is rounded to 8 bytes.
+		 * Required by the RFC.
+		 */
+		if (end & 0x7) {
+			/* RFC2460 says always send parameter problem in
+			 * this case. -DaveM
+			 */
+			DEBUGP("the end of this fragment is not rounded to 8 bytes.\n");
+			return -1;
+		}
+		if (end > fq->len) {
+			/* Some bits beyond end -> corruption. */
+			if (fq->last_in & LAST_IN) {
+				DEBUGP("last packet already reached.\n");
+				goto err;
+			}
+			fq->len = end;
+		}
+	}
+
+	if (end == offset)
+		goto err;
+
+	/* Point into the IP datagram 'data' part. */
+	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
+		DEBUGP("queue: message is too short.\n");
+		goto err;
+	}
+	if (end-offset < skb->len) {
+		if (pskb_trim(skb, end - offset)) {
+			DEBUGP("Can't trim\n");
+			goto err;
+		}
+		if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+			skb->ip_summed = CHECKSUM_NONE;
+	}
+
+	/* Find out which fragments are in front and at the back of us
+	 * in the chain of fragments so far.  We must know where to put
+	 * this fragment, right?
+	 */
+	prev = NULL;
+	for (next = fq->fragments; next != NULL; next = next->next) {
+		if (NFCT_FRAG6_CB(next)->offset >= offset)
+			break;	/* bingo! */
+		prev = next;
+	}
+
+	/* We found where to put this one.  Check for overlap with
+	 * preceding fragment, and, if needed, align things so that
+	 * any overlaps are eliminated.
+	 */
+	if (prev) {
+		int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;
+
+		if (i > 0) {
+			offset += i;
+			if (end <= offset) {
+				DEBUGP("overlap\n");
+				goto err;
+			}
+			if (!pskb_pull(skb, i)) {
+				DEBUGP("Can't pull\n");
+				goto err;
+			}
+			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+				skb->ip_summed = CHECKSUM_NONE;
+		}
+	}
+
+	/* Look for overlap with succeeding segments.
+	 * If we can merge fragments, do it.
+	 */
+	while (next && NFCT_FRAG6_CB(next)->offset < end) {
+		/* overlap is 'i' bytes */
+		int i = end - NFCT_FRAG6_CB(next)->offset;
+
+		if (i < next->len) {
+			/* Eat head of the next overlapped fragment
+			 * and leave the loop. The next ones cannot overlap.
+			 */
+			DEBUGP("Eat head of the overlapped parts.: %d", i);
+			if (!pskb_pull(next, i))
+				goto err;
+
+			/* next fragment */
+			NFCT_FRAG6_CB(next)->offset += i;
+			fq->meat -= i;
+			if (next->ip_summed != CHECKSUM_UNNECESSARY)
+				next->ip_summed = CHECKSUM_NONE;
+			break;
+		} else {
+			struct sk_buff *free_it = next;
+
+			/* Old fragmnet is completely overridden with
+			 * new one drop it.
+			 */
+			next = next->next;
+
+			if (prev)
+				prev->next = next;
+			else
+				fq->fragments = next;
+
+			fq->meat -= free_it->len;
+			frag_kfree_skb(free_it);
+		}
+	}
+
+	NFCT_FRAG6_CB(skb)->offset = offset;
+
+	/* Insert this fragment in the chain of fragments. */
+	skb->next = next;
+	if (prev)
+		prev->next = skb;
+	else
+		fq->fragments = skb;
+
+	skb->dev = NULL;
+	skb_get_timestamp(skb, &fq->stamp);
+	fq->meat += skb->len;
+	atomic_add(skb->truesize, &nf_ct_frag6_mem);
+
+	/* The first fragment.
+	 * nhoffset is obtained from the first fragment, of course.
+	 */
+	if (offset == 0) {
+		fq->nhoffset = nhoff;
+		fq->last_in |= FIRST_IN;
+	}
+	write_lock(&nf_ct_frag6_lock);
+	list_move_tail(&fq->lru_list, &nf_ct_frag6_lru_list);
+	write_unlock(&nf_ct_frag6_lock);
+	return 0;
+
+err:
+	return -1;
+}
+
+/*
+ *	Check if this packet is complete.
+ *	Returns NULL on failure by any reason, and pointer
+ *	to current nexthdr field in reassembled frame.
+ *
+ *	It is called with locked fq, and caller must check that
+ *	queue is eligible for reassembly i.e. it is not COMPLETE,
+ *	the last and the first frames arrived and all the bits are here.
+ */
+static struct sk_buff *
+nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
+{
+	struct sk_buff *fp, *op, *head = fq->fragments;
+	int    payload_len;
+
+	fq_kill(fq);
+
+	BUG_TRAP(head != NULL);
+	BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0);
+
+	/* Unfragmented part is taken from the first segment. */
+	payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr);
+	if (payload_len > IPV6_MAXPLEN) {
+		DEBUGP("payload len is too large.\n");
+		goto out_oversize;
+	}
+
+	/* Head of list must not be cloned. */
+	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
+		DEBUGP("skb is cloned but can't expand head");
+		goto out_oom;
+	}
+
+	/* If the first fragment is fragmented itself, we split
+	 * it to two chunks: the first with data and paged part
+	 * and the second, holding only fragments. */
+	if (skb_shinfo(head)->frag_list) {
+		struct sk_buff *clone;
+		int i, plen = 0;
+
+		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) {
+			DEBUGP("Can't alloc skb\n");
+			goto out_oom;
+		}
+		clone->next = head->next;
+		head->next = clone;
+		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+		skb_shinfo(head)->frag_list = NULL;
+		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
+			plen += skb_shinfo(head)->frags[i].size;
+		clone->len = clone->data_len = head->data_len - plen;
+		head->data_len -= clone->len;
+		head->len -= clone->len;
+		clone->csum = 0;
+		clone->ip_summed = head->ip_summed;
+
+		NFCT_FRAG6_CB(clone)->orig = NULL;
+		atomic_add(clone->truesize, &nf_ct_frag6_mem);
+	}
+
+	/* We have to remove fragment header from datagram and to relocate
+	 * header in order to calculate ICV correctly. */
+	head->nh.raw[fq->nhoffset] = head->h.raw[0];
+	memmove(head->head + sizeof(struct frag_hdr), head->head, 
+		(head->data - head->head) - sizeof(struct frag_hdr));
+	head->mac.raw += sizeof(struct frag_hdr);
+	head->nh.raw += sizeof(struct frag_hdr);
+
+	skb_shinfo(head)->frag_list = head->next;
+	head->h.raw = head->data;
+	skb_push(head, head->data - head->nh.raw);
+	atomic_sub(head->truesize, &nf_ct_frag6_mem);
+
+	for (fp=head->next; fp; fp = fp->next) {
+		head->data_len += fp->len;
+		head->len += fp->len;
+		if (head->ip_summed != fp->ip_summed)
+			head->ip_summed = CHECKSUM_NONE;
+		else if (head->ip_summed == CHECKSUM_HW)
+			head->csum = csum_add(head->csum, fp->csum);
+		head->truesize += fp->truesize;
+		atomic_sub(fp->truesize, &nf_ct_frag6_mem);
+	}
+
+	head->next = NULL;
+	head->dev = dev;
+	skb_set_timestamp(head, &fq->stamp);
+	head->nh.ipv6h->payload_len = htons(payload_len);
+
+	/* Yes, and fold redundant checksum back. 8) */
+	if (head->ip_summed == CHECKSUM_HW)
+		head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
+
+	fq->fragments = NULL;
+
+	/* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
+	fp = skb_shinfo(head)->frag_list;
+	if (NFCT_FRAG6_CB(fp)->orig == NULL)
+		/* at above code, head skb is divided into two skbs. */
+		fp = fp->next;
+
+	op = NFCT_FRAG6_CB(head)->orig;
+	for (; fp; fp = fp->next) {
+		struct sk_buff *orig = NFCT_FRAG6_CB(fp)->orig;
+
+		op->next = orig;
+		op = orig;
+		NFCT_FRAG6_CB(fp)->orig = NULL;
+	}
+
+	return head;
+
+out_oversize:
+	if (net_ratelimit())
+		printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len);
+	goto out_fail;
+out_oom:
+	if (net_ratelimit())
+		printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
+out_fail:
+	return NULL;
+}
+
+/*
+ * find the header just before Fragment Header.
+ *
+ * if success return 0 and set ...
+ * (*prevhdrp): the value of "Next Header Field" in the header
+ *		just before Fragment Header.
+ * (*prevhoff): the offset of "Next Header Field" in the header
+ *		just before Fragment Header.
+ * (*fhoff)   : the offset of Fragment Header.
+ *
+ * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
+ *
+ */
+static int
+find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
+{
+        u8 nexthdr = skb->nh.ipv6h->nexthdr;
+	u8 prev_nhoff = (u8 *)&skb->nh.ipv6h->nexthdr - skb->data;
+	int start = (u8 *)(skb->nh.ipv6h+1) - skb->data;
+	int len = skb->len - start;
+	u8 prevhdr = NEXTHDR_IPV6;
+
+        while (nexthdr != NEXTHDR_FRAGMENT) {
+                struct ipv6_opt_hdr hdr;
+                int hdrlen;
+
+		if (!ipv6_ext_hdr(nexthdr)) {
+			return -1;
+		}
+                if (len < (int)sizeof(struct ipv6_opt_hdr)) {
+			DEBUGP("too short\n");
+			return -1;
+		}
+                if (nexthdr == NEXTHDR_NONE) {
+			DEBUGP("next header is none\n");
+			return -1;
+		}
+                if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
+                        BUG();
+                if (nexthdr == NEXTHDR_AUTH)
+                        hdrlen = (hdr.hdrlen+2)<<2;
+                else
+                        hdrlen = ipv6_optlen(&hdr);
+
+		prevhdr = nexthdr;
+		prev_nhoff = start;
+
+                nexthdr = hdr.nexthdr;
+                len -= hdrlen;
+                start += hdrlen;
+        }
+
+	if (len < 0)
+		return -1;
+
+	*prevhdrp = prevhdr;
+	*prevhoff = prev_nhoff;
+	*fhoff = start;
+
+	return 0;
+}
+
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
+{
+	struct sk_buff *clone; 
+	struct net_device *dev = skb->dev;
+	struct frag_hdr *fhdr;
+	struct nf_ct_frag6_queue *fq;
+	struct ipv6hdr *hdr;
+	int fhoff, nhoff;
+	u8 prevhdr;
+	struct sk_buff *ret_skb = NULL;
+
+	/* Jumbo payload inhibits frag. header */
+	if (skb->nh.ipv6h->payload_len == 0) {
+		DEBUGP("payload len = 0\n");
+		return skb;
+	}
+
+	if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
+		return skb;
+
+	clone = skb_clone(skb, GFP_ATOMIC);
+	if (clone == NULL) {
+		DEBUGP("Can't clone skb\n");
+		return skb;
+	}
+
+	NFCT_FRAG6_CB(clone)->orig = skb;
+
+	if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) {
+		DEBUGP("message is too short.\n");
+		goto ret_orig;
+	}
+
+	clone->h.raw = clone->data + fhoff;
+	hdr = clone->nh.ipv6h;
+	fhdr = (struct frag_hdr *)clone->h.raw;
+
+	if (!(fhdr->frag_off & htons(0xFFF9))) {
+		DEBUGP("Invalid fragment offset\n");
+		/* It is not a fragmented frame */
+		goto ret_orig;
+	}
+
+	if (atomic_read(&nf_ct_frag6_mem) > nf_ct_frag6_high_thresh)
+		nf_ct_frag6_evictor();
+
+	fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
+	if (fq == NULL) {
+		DEBUGP("Can't find and can't create new queue\n");
+		goto ret_orig;
+	}
+
+	spin_lock(&fq->lock);
+
+	if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
+		spin_unlock(&fq->lock);
+		DEBUGP("Can't insert skb to queue\n");
+		fq_put(fq);
+		goto ret_orig;
+	}
+
+	if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) {
+		ret_skb = nf_ct_frag6_reasm(fq, dev);
+		if (ret_skb == NULL)
+			DEBUGP("Can't reassemble fragmented packets\n");
+	}
+	spin_unlock(&fq->lock);
+
+	fq_put(fq);
+	return ret_skb;
+
+ret_orig:
+	kfree_skb(clone);
+	return skb;
+}
+
+void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+			struct net_device *in, struct net_device *out,
+			int (*okfn)(struct sk_buff *))
+{
+	struct sk_buff *s, *s2;
+
+	for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
+		nf_conntrack_put_reasm(s->nfct_reasm);
+		nf_conntrack_get_reasm(skb);
+		s->nfct_reasm = skb;
+
+		s2 = s->next;
+		NF_HOOK_THRESH(PF_INET6, hooknum, s, in, out, okfn,
+			       NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+		s = s2;
+	}
+	nf_conntrack_put_reasm(skb);
+}
+
+int nf_ct_frag6_kfree_frags(struct sk_buff *skb)
+{
+	struct sk_buff *s, *s2;
+
+	for (s = NFCT_FRAG6_CB(skb)->orig; s; s = s2) {
+
+		s2 = s->next;
+		kfree_skb(s);
+	}
+
+	kfree_skb(skb);
+
+	return 0;
+}
+
+int nf_ct_frag6_init(void)
+{
+	nf_ct_frag6_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
+				   (jiffies ^ (jiffies >> 6)));
+
+	init_timer(&nf_ct_frag6_secret_timer);
+	nf_ct_frag6_secret_timer.function = nf_ct_frag6_secret_rebuild;
+	nf_ct_frag6_secret_timer.expires = jiffies
+					   + nf_ct_frag6_secret_interval;
+	add_timer(&nf_ct_frag6_secret_timer);
+
+	return 0;
+}
+
+void nf_ct_frag6_cleanup(void)
+{
+	del_timer(&nf_ct_frag6_secret_timer);
+	nf_ct_frag6_evictor();
+}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index a1265a3..8e9628f 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -174,8 +174,10 @@
 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
 
 			/* Not releasing hash table! */
-			if (clone)
+			if (clone) {
+				nf_reset(clone);
 				rawv6_rcv(sk, clone);
+			}
 		}
 		sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr,
 				     IP6CB(skb)->iif);
@@ -296,13 +298,10 @@
 static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
 {
 	if ((raw6_sk(sk)->checksum || sk->sk_filter) && 
-	    skb->ip_summed != CHECKSUM_UNNECESSARY) {
-		if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
-			/* FIXME: increment a raw6 drops counter here */
-			kfree_skb(skb);
-			return 0;
-		}
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	    skb_checksum_complete(skb)) {
+		/* FIXME: increment a raw6 drops counter here */
+		kfree_skb(skb);
+		return 0;
 	}
 
 	/* Charge it to the socket. */
@@ -335,32 +334,25 @@
 	if (!rp->checksum)
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-	if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
-		if (skb->ip_summed == CHECKSUM_HW) {
-			skb_postpull_rcsum(skb, skb->nh.raw,
-			                   skb->h.raw - skb->nh.raw);
+	if (skb->ip_summed == CHECKSUM_HW) {
+		skb_postpull_rcsum(skb, skb->nh.raw,
+		                   skb->h.raw - skb->nh.raw);
+		if (!csum_ipv6_magic(&skb->nh.ipv6h->saddr,
+				     &skb->nh.ipv6h->daddr,
+				     skb->len, inet->num, skb->csum))
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
-			if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
-					    &skb->nh.ipv6h->daddr,
-					    skb->len, inet->num, skb->csum)) {
-				LIMIT_NETDEBUG(KERN_DEBUG "raw v6 hw csum failure.\n");
-				skb->ip_summed = CHECKSUM_NONE;
-			}
-		}
-		if (skb->ip_summed == CHECKSUM_NONE)
-			skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
-						     &skb->nh.ipv6h->daddr,
-						     skb->len, inet->num, 0);
 	}
+	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+		skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
+					     &skb->nh.ipv6h->daddr,
+					     skb->len, inet->num, 0);
 
 	if (inet->hdrincl) {
-		if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
-		    (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
+		if (skb_checksum_complete(skb)) {
 			/* FIXME: increment a raw6 drops counter here */
 			kfree_skb(skb);
 			return 0;
 		}
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 
 	rawv6_rcv_skb(sk, skb);
@@ -405,7 +397,7 @@
 	if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
 		err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 	} else if (msg->msg_flags&MSG_TRUNC) {
-		if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
+		if (__skb_checksum_complete(skb))
 			goto csum_copy_err;
 		err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 	} else {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 227e99e..f7f42c3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1710,7 +1710,7 @@
 static int fib6_dump_done(struct netlink_callback *cb)
 {
 	fib6_dump_end(cb);
-	return cb->done(cb);
+	return cb->done ? cb->done(cb) : 0;
 }
 
 int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d746d3b..62c0e5b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1401,20 +1401,18 @@
 static int tcp_v6_checksum_init(struct sk_buff *skb)
 {
 	if (skb->ip_summed == CHECKSUM_HW) {
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
 		if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
-				  &skb->nh.ipv6h->daddr,skb->csum))
+				  &skb->nh.ipv6h->daddr,skb->csum)) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
 			return 0;
-		LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v6 csum failed\n");
+		}
 	}
+
+	skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
+				  &skb->nh.ipv6h->daddr, 0);
+
 	if (skb->len <= 76) {
-		if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
-				 &skb->nh.ipv6h->daddr,skb_checksum(skb, 0, skb->len, 0)))
-			return -1;
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-	} else {
-		skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
-					  &skb->nh.ipv6h->daddr,0);
+		return __skb_checksum_complete(skb);
 	}
 	return 0;
 }
@@ -1575,7 +1573,7 @@
 		goto discard_it;
 
 	if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
-	     tcp_v6_checksum_init(skb) < 0))
+	     tcp_v6_checksum_init(skb)))
 		goto bad_packet;
 
 	th = skb->h.th;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bf95193..e671153 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -248,7 +248,7 @@
 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
 					      copied);
 	} else if (msg->msg_flags&MSG_TRUNC) {
-		if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
+		if (__skb_checksum_complete(skb))
 			goto csum_copy_err;
 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
 					      copied);
@@ -363,13 +363,10 @@
 		return -1;
 	}
 
-	if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
-		if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
-			UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
-			kfree_skb(skb);
-			return 0;
-		}
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	if (skb_checksum_complete(skb)) {
+		UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
+		kfree_skb(skb);
+		return 0;
 	}
 
 	if (sock_queue_rcv_skb(sk,skb)<0) {
@@ -491,13 +488,10 @@
 		uh = skb->h.uh;
 	}
 
-	if (skb->ip_summed==CHECKSUM_HW) {
+	if (skb->ip_summed == CHECKSUM_HW &&
+	    !csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		if (csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum)) {
-			LIMIT_NETDEBUG(KERN_DEBUG "udp v6 hw csum failure.\n");
-			skb->ip_summed = CHECKSUM_NONE;
-		}
-	}
+
 	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
 		skb->csum = ~csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, 0);
 
@@ -521,8 +515,7 @@
 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 			goto discard;
 
-		if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
-		    (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
+		if (skb_checksum_complete(skb))
 			goto discard;
 		UDP6_INC_STATS_BH(UDP_MIB_NOPORTS);
 
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8296b38..a84f922 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1,3 +1,6 @@
+menu "Core Netfilter Configuration"
+	depends on NET && NETFILTER
+
 config NETFILTER_NETLINK
        tristate "Netfilter netlink interface"
        help
@@ -22,3 +25,74 @@
 	  and is also scheduled to replace the old syslog-based ipt_LOG
 	  and ip6t_LOG modules.
 
+config NF_CONNTRACK
+	tristate "Layer 3 Independent Connection tracking (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && IP_NF_CONNTRACK=n
+	default n
+	---help---
+	  Connection tracking keeps a record of what packets have passed
+	  through your machine, in order to figure out how they are related
+	  into connections.
+
+	  Layer 3 independent connection tracking is experimental scheme
+	  which generalize ip_conntrack to support other layer 3 protocols.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config NF_CT_ACCT
+	bool "Connection tracking flow accounting"
+	depends on NF_CONNTRACK
+	help
+	  If this option is enabled, the connection tracking code will
+	  keep per-flow packet and byte counters.
+
+	  Those counters can be used for flow-based accounting or the
+	  `connbytes' match.
+
+	  If unsure, say `N'.
+
+config NF_CONNTRACK_MARK
+	bool  'Connection mark tracking support'
+	depends on NF_CONNTRACK
+	help
+	  This option enables support for connection marks, used by the
+	  `CONNMARK' target and `connmark' match. Similar to the mark value
+	  of packets, but this mark value is kept in the conntrack session
+	  instead of the individual packets.
+
+config NF_CONNTRACK_EVENTS
+	bool "Connection tracking events"
+	depends on NF_CONNTRACK
+	help
+	  If this option is enabled, the connection tracking code will
+	  provide a notifier chain that can be used by other kernel code
+	  to get notified aboutchanges in the connection tracking state.
+
+	  If unsure, say `N'.
+
+config NF_CT_PROTO_SCTP
+	tristate 'SCTP protocol on new connection tracking support (EXPERIMENTAL)'
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	default n
+	help
+	  With this option enabled, the layer 3 independent connection
+	  tracking code will be able to do state tracking on SCTP connections.
+
+	  If you want to compile it as a module, say M here and read
+	  Documentation/modules.txt.  If unsure, say `N'.
+
+config NF_CONNTRACK_FTP
+	tristate "FTP support on new connection tracking (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	help
+	  Tracking FTP connections is problematic: special helpers are
+	  required for tracking them, and doing masquerading and other forms
+	  of Network Address Translation on them.
+
+	  This is FTP support on Layer 3 independent connection tracking.
+	  Layer 3 independent connection tracking is experimental scheme
+	  which generalize ip_conntrack to support other layer 3 protocols.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+endmenu
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index b3b44f8..55f019a 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -5,3 +5,11 @@
 obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
 obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
 obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
+
+nf_conntrack-objs	:= nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o
+
+obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
+obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
+
+# SCTP protocol connection tracking
+obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
new file mode 100644
index 0000000..9a67c79
--- /dev/null
+++ b/net/netfilter/nf_conntrack_core.c
@@ -0,0 +1,1538 @@
+/* Connection state tracking for netfilter.  This is separated from,
+   but required by, the NAT layer; it can also be used by an iptables
+   extension. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
+ *	- new API and handling of conntrack/nat helpers
+ *	- now capable of multiple expectations for one master
+ * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
+ *	- add usage/reference counts to ip_conntrack_expect
+ *	- export ip_conntrack[_expect]_{find_get,put} functions
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- generalize L3 protocol denendent part.
+ * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- add support various size of conntrack structures.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_core.c
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+
+/* This rwlock protects the main hash table, protocol/helper/expected
+   registrations, conntrack timers*/
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <linux/netfilter_ipv4/listhelp.h>
+
+#define NF_CONNTRACK_VERSION	"0.4.1"
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+DEFINE_RWLOCK(nf_conntrack_lock);
+
+/* nf_conntrack_standalone needs this */
+atomic_t nf_conntrack_count = ATOMIC_INIT(0);
+
+void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL;
+LIST_HEAD(nf_conntrack_expect_list);
+struct nf_conntrack_protocol **nf_ct_protos[PF_MAX];
+struct nf_conntrack_l3proto *nf_ct_l3protos[PF_MAX];
+static LIST_HEAD(helpers);
+unsigned int nf_conntrack_htable_size = 0;
+int nf_conntrack_max;
+struct list_head *nf_conntrack_hash;
+static kmem_cache_t *nf_conntrack_expect_cachep;
+struct nf_conn nf_conntrack_untracked;
+unsigned int nf_ct_log_invalid;
+static LIST_HEAD(unconfirmed);
+static int nf_conntrack_vmalloc;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+struct notifier_block *nf_conntrack_chain;
+struct notifier_block *nf_conntrack_expect_chain;
+
+DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
+
+/* deliver cached events and clear cache entry - must be called with locally
+ * disabled softirqs */
+static inline void
+__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
+{
+	DEBUGP("ecache: delivering events for %p\n", ecache->ct);
+	if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
+	    && ecache->events)
+		notifier_call_chain(&nf_conntrack_chain, ecache->events,
+				    ecache->ct);
+
+	ecache->events = 0;
+	nf_ct_put(ecache->ct);
+	ecache->ct = NULL;
+}
+
+/* Deliver all cached events for a particular conntrack. This is called
+ * by code prior to async packet handling for freeing the skb */
+void nf_ct_deliver_cached_events(const struct nf_conn *ct)
+{
+	struct nf_conntrack_ecache *ecache;
+
+	local_bh_disable();
+	ecache = &__get_cpu_var(nf_conntrack_ecache);
+	if (ecache->ct == ct)
+		__nf_ct_deliver_cached_events(ecache);
+	local_bh_enable();
+}
+
+/* Deliver cached events for old pending events, if current conntrack != old */
+void __nf_ct_event_cache_init(struct nf_conn *ct)
+{
+	struct nf_conntrack_ecache *ecache;
+	
+	/* take care of delivering potentially old events */
+	ecache = &__get_cpu_var(nf_conntrack_ecache);
+	BUG_ON(ecache->ct == ct);
+	if (ecache->ct)
+		__nf_ct_deliver_cached_events(ecache);
+	/* initialize for this conntrack/packet */
+	ecache->ct = ct;
+	nf_conntrack_get(&ct->ct_general);
+}
+
+/* flush the event cache - touches other CPU's data and must not be called
+ * while packets are still passing through the code */
+static void nf_ct_event_cache_flush(void)
+{
+	struct nf_conntrack_ecache *ecache;
+	int cpu;
+
+	for_each_cpu(cpu) {
+		ecache = &per_cpu(nf_conntrack_ecache, cpu);
+		if (ecache->ct)
+			nf_ct_put(ecache->ct);
+	}
+}
+#else
+static inline void nf_ct_event_cache_flush(void) {}
+#endif /* CONFIG_NF_CONNTRACK_EVENTS */
+
+DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
+EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
+
+/*
+ * This scheme offers various size of "struct nf_conn" dependent on
+ * features(helper, nat, ...)
+ */
+
+#define NF_CT_FEATURES_NAMELEN	256
+static struct {
+	/* name of slab cache. printed in /proc/slabinfo */
+	char *name;
+
+	/* size of slab cache */
+	size_t size;
+
+	/* slab cache pointer */
+	kmem_cache_t *cachep;
+
+	/* allocated slab cache + modules which uses this slab cache */
+	int use;
+
+	/* Initialization */
+	int (*init_conntrack)(struct nf_conn *, u_int32_t);
+
+} nf_ct_cache[NF_CT_F_NUM];
+
+/* protect members of nf_ct_cache except of "use" */
+DEFINE_RWLOCK(nf_ct_cache_lock);
+
+/* This avoids calling kmem_cache_create() with same name simultaneously */
+DECLARE_MUTEX(nf_ct_cache_mutex);
+
+extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
+struct nf_conntrack_protocol *
+nf_ct_find_proto(u_int16_t l3proto, u_int8_t protocol)
+{
+	if (unlikely(nf_ct_protos[l3proto] == NULL))
+		return &nf_conntrack_generic_protocol;
+
+	return nf_ct_protos[l3proto][protocol];
+}
+
+static int nf_conntrack_hash_rnd_initted;
+static unsigned int nf_conntrack_hash_rnd;
+
+static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
+				  unsigned int size, unsigned int rnd)
+{
+	unsigned int a, b;
+	a = jhash((void *)tuple->src.u3.all, sizeof(tuple->src.u3.all),
+		  ((tuple->src.l3num) << 16) | tuple->dst.protonum);
+	b = jhash((void *)tuple->dst.u3.all, sizeof(tuple->dst.u3.all),
+			(tuple->src.u.all << 16) | tuple->dst.u.all);
+
+	return jhash_2words(a, b, rnd) % size;
+}
+
+static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
+{
+	return __hash_conntrack(tuple, nf_conntrack_htable_size,
+				nf_conntrack_hash_rnd);
+}
+
+/* Initialize "struct nf_conn" which has spaces for helper */
+static int
+init_conntrack_for_helper(struct nf_conn *conntrack, u_int32_t features)
+{
+
+	conntrack->help = (union nf_conntrack_help *)
+		(((unsigned long)conntrack->data
+		  + (__alignof__(union nf_conntrack_help) - 1))
+		 & (~((unsigned long)(__alignof__(union nf_conntrack_help) -1))));
+	return 0;
+}
+
+int nf_conntrack_register_cache(u_int32_t features, const char *name,
+				size_t size,
+				int (*init)(struct nf_conn *, u_int32_t))
+{
+	int ret = 0;
+	char *cache_name;
+	kmem_cache_t *cachep;
+
+	DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
+	       features, name, size);
+
+	if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) {
+		DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n",
+			features);
+		return -EINVAL;
+	}
+
+	down(&nf_ct_cache_mutex);
+
+	write_lock_bh(&nf_ct_cache_lock);
+	/* e.g: multiple helpers are loaded */
+	if (nf_ct_cache[features].use > 0) {
+		DEBUGP("nf_conntrack_register_cache: already resisterd.\n");
+		if ((!strncmp(nf_ct_cache[features].name, name,
+			      NF_CT_FEATURES_NAMELEN))
+		    && nf_ct_cache[features].size == size
+		    && nf_ct_cache[features].init_conntrack == init) {
+			DEBUGP("nf_conntrack_register_cache: reusing.\n");
+			nf_ct_cache[features].use++;
+			ret = 0;
+		} else
+			ret = -EBUSY;
+
+		write_unlock_bh(&nf_ct_cache_lock);
+		up(&nf_ct_cache_mutex);
+		return ret;
+	}
+	write_unlock_bh(&nf_ct_cache_lock);
+
+	/*
+	 * The memory space for name of slab cache must be alive until
+	 * cache is destroyed.
+	 */
+	cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC);
+	if (cache_name == NULL) {
+		DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n");
+		ret = -ENOMEM;
+		goto out_up_mutex;
+	}
+
+	if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN)
+						>= NF_CT_FEATURES_NAMELEN) {
+		printk("nf_conntrack_register_cache: name too long\n");
+		ret = -EINVAL;
+		goto out_free_name;
+	}
+
+	cachep = kmem_cache_create(cache_name, size, 0, 0,
+				   NULL, NULL);
+	if (!cachep) {
+		printk("nf_conntrack_register_cache: Can't create slab cache "
+		       "for the features = 0x%x\n", features);
+		ret = -ENOMEM;
+		goto out_free_name;
+	}
+
+	write_lock_bh(&nf_ct_cache_lock);
+	nf_ct_cache[features].use = 1;
+	nf_ct_cache[features].size = size;
+	nf_ct_cache[features].init_conntrack = init;
+	nf_ct_cache[features].cachep = cachep;
+	nf_ct_cache[features].name = cache_name;
+	write_unlock_bh(&nf_ct_cache_lock);
+
+	goto out_up_mutex;
+
+out_free_name:
+	kfree(cache_name);
+out_up_mutex:
+	up(&nf_ct_cache_mutex);
+	return ret;
+}
+
+/* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
+void nf_conntrack_unregister_cache(u_int32_t features)
+{
+	kmem_cache_t *cachep;
+	char *name;
+
+	/*
+	 * This assures that kmem_cache_create() isn't called before destroying
+	 * slab cache.
+	 */
+	DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features);
+	down(&nf_ct_cache_mutex);
+
+	write_lock_bh(&nf_ct_cache_lock);
+	if (--nf_ct_cache[features].use > 0) {
+		write_unlock_bh(&nf_ct_cache_lock);
+		up(&nf_ct_cache_mutex);
+		return;
+	}
+	cachep = nf_ct_cache[features].cachep;
+	name = nf_ct_cache[features].name;
+	nf_ct_cache[features].cachep = NULL;
+	nf_ct_cache[features].name = NULL;
+	nf_ct_cache[features].init_conntrack = NULL;
+	nf_ct_cache[features].size = 0;
+	write_unlock_bh(&nf_ct_cache_lock);
+
+	synchronize_net();
+
+	kmem_cache_destroy(cachep);
+	kfree(name);
+
+	up(&nf_ct_cache_mutex);
+}
+
+int
+nf_ct_get_tuple(const struct sk_buff *skb,
+		unsigned int nhoff,
+		unsigned int dataoff,
+		u_int16_t l3num,
+		u_int8_t protonum,
+		struct nf_conntrack_tuple *tuple,
+		const struct nf_conntrack_l3proto *l3proto,
+		const struct nf_conntrack_protocol *protocol)
+{
+	NF_CT_TUPLE_U_BLANK(tuple);
+
+	tuple->src.l3num = l3num;
+	if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
+		return 0;
+
+	tuple->dst.protonum = protonum;
+	tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+	return protocol->pkt_to_tuple(skb, dataoff, tuple);
+}
+
+int
+nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+		   const struct nf_conntrack_tuple *orig,
+		   const struct nf_conntrack_l3proto *l3proto,
+		   const struct nf_conntrack_protocol *protocol)
+{
+	NF_CT_TUPLE_U_BLANK(inverse);
+
+	inverse->src.l3num = orig->src.l3num;
+	if (l3proto->invert_tuple(inverse, orig) == 0)
+		return 0;
+
+	inverse->dst.dir = !orig->dst.dir;
+
+	inverse->dst.protonum = orig->dst.protonum;
+	return protocol->invert_tuple(inverse, orig);
+}
+
+/* nf_conntrack_expect helper functions */
+static void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
+{
+	ASSERT_WRITE_LOCK(&nf_conntrack_lock);
+	NF_CT_ASSERT(!timer_pending(&exp_timeout));
+	list_del(&exp->list);
+	NF_CT_STAT_INC(expect_delete);
+	exp->master->expecting--;
+	nf_conntrack_expect_put(exp);
+}
+
+static void expectation_timed_out(unsigned long ul_expect)
+{
+	struct nf_conntrack_expect *exp = (void *)ul_expect;
+
+	write_lock_bh(&nf_conntrack_lock);
+	nf_ct_unlink_expect(exp);
+	write_unlock_bh(&nf_conntrack_lock);
+	nf_conntrack_expect_put(exp);
+}
+
+/* If an expectation for this connection is found, it gets delete from
+ * global list then returned. */
+static struct nf_conntrack_expect *
+find_expectation(const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conntrack_expect *i;
+
+	list_for_each_entry(i, &nf_conntrack_expect_list, list) {
+	/* If master is not in hash table yet (ie. packet hasn't left
+	   this machine yet), how can other end know about expected?
+	   Hence these are not the droids you are looking for (if
+	   master ct never got confirmed, we'd hold a reference to it
+	   and weird things would happen to future packets). */
+		if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
+		    && nf_ct_is_confirmed(i->master)) {
+			if (i->flags & NF_CT_EXPECT_PERMANENT) {
+				atomic_inc(&i->use);
+				return i;
+			} else if (del_timer(&i->timeout)) {
+				nf_ct_unlink_expect(i);
+				return i;
+			}
+		}
+	}
+	return NULL;
+}
+
+/* delete all expectations for this conntrack */
+static void remove_expectations(struct nf_conn *ct)
+{
+	struct nf_conntrack_expect *i, *tmp;
+
+	/* Optimization: most connection never expect any others. */
+	if (ct->expecting == 0)
+		return;
+
+	list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) {
+		if (i->master == ct && del_timer(&i->timeout)) {
+			nf_ct_unlink_expect(i);
+			nf_conntrack_expect_put(i);
+ 		}
+	}
+}
+
+static void
+clean_from_lists(struct nf_conn *ct)
+{
+	unsigned int ho, hr;
+	
+	DEBUGP("clean_from_lists(%p)\n", ct);
+	ASSERT_WRITE_LOCK(&nf_conntrack_lock);
+
+	ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+	hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+	LIST_DELETE(&nf_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
+	LIST_DELETE(&nf_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
+
+	/* Destroy all pending expectations */
+	remove_expectations(ct);
+}
+
+static void
+destroy_conntrack(struct nf_conntrack *nfct)
+{
+	struct nf_conn *ct = (struct nf_conn *)nfct;
+	struct nf_conntrack_l3proto *l3proto;
+	struct nf_conntrack_protocol *proto;
+
+	DEBUGP("destroy_conntrack(%p)\n", ct);
+	NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
+	NF_CT_ASSERT(!timer_pending(&ct->timeout));
+
+	nf_conntrack_event(IPCT_DESTROY, ct);
+	set_bit(IPS_DYING_BIT, &ct->status);
+
+	/* To make sure we don't get any weird locking issues here:
+	 * destroy_conntrack() MUST NOT be called with a write lock
+	 * to nf_conntrack_lock!!! -HW */
+	l3proto = nf_ct_find_l3proto(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num);
+	if (l3proto && l3proto->destroy)
+		l3proto->destroy(ct);
+
+	proto = nf_ct_find_proto(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
+				 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
+	if (proto && proto->destroy)
+		proto->destroy(ct);
+
+	if (nf_conntrack_destroyed)
+		nf_conntrack_destroyed(ct);
+
+	write_lock_bh(&nf_conntrack_lock);
+	/* Expectations will have been removed in clean_from_lists,
+	 * except TFTP can create an expectation on the first packet,
+	 * before connection is in the list, so we need to clean here,
+	 * too. */
+	remove_expectations(ct);
+
+	/* We overload first tuple to link into unconfirmed list. */
+	if (!nf_ct_is_confirmed(ct)) {
+		BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
+		list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+	}
+
+	NF_CT_STAT_INC(delete);
+	write_unlock_bh(&nf_conntrack_lock);
+
+	if (ct->master)
+		nf_ct_put(ct->master);
+
+	DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
+	nf_conntrack_free(ct);
+}
+
+static void death_by_timeout(unsigned long ul_conntrack)
+{
+	struct nf_conn *ct = (void *)ul_conntrack;
+
+	write_lock_bh(&nf_conntrack_lock);
+	/* Inside lock so preempt is disabled on module removal path.
+	 * Otherwise we can get spurious warnings. */
+	NF_CT_STAT_INC(delete_list);
+	clean_from_lists(ct);
+	write_unlock_bh(&nf_conntrack_lock);
+	nf_ct_put(ct);
+}
+
+static inline int
+conntrack_tuple_cmp(const struct nf_conntrack_tuple_hash *i,
+		    const struct nf_conntrack_tuple *tuple,
+		    const struct nf_conn *ignored_conntrack)
+{
+	ASSERT_READ_LOCK(&nf_conntrack_lock);
+	return nf_ct_tuplehash_to_ctrack(i) != ignored_conntrack
+		&& nf_ct_tuple_equal(tuple, &i->tuple);
+}
+
+static struct nf_conntrack_tuple_hash *
+__nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
+		    const struct nf_conn *ignored_conntrack)
+{
+	struct nf_conntrack_tuple_hash *h;
+	unsigned int hash = hash_conntrack(tuple);
+
+	ASSERT_READ_LOCK(&nf_conntrack_lock);
+	list_for_each_entry(h, &nf_conntrack_hash[hash], list) {
+		if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) {
+			NF_CT_STAT_INC(found);
+			return h;
+		}
+		NF_CT_STAT_INC(searched);
+	}
+
+	return NULL;
+}
+
+/* Find a connection corresponding to a tuple. */
+struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple,
+		      const struct nf_conn *ignored_conntrack)
+{
+	struct nf_conntrack_tuple_hash *h;
+
+	read_lock_bh(&nf_conntrack_lock);
+	h = __nf_conntrack_find(tuple, ignored_conntrack);
+	if (h)
+		atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
+	read_unlock_bh(&nf_conntrack_lock);
+
+	return h;
+}
+
+/* Confirm a connection given skb; places it in hash table */
+int
+__nf_conntrack_confirm(struct sk_buff **pskb)
+{
+	unsigned int hash, repl_hash;
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	ct = nf_ct_get(*pskb, &ctinfo);
+
+	/* ipt_REJECT uses nf_conntrack_attach to attach related
+	   ICMP/TCP RST packets in other direction.  Actual packet
+	   which created connection will be IP_CT_NEW or for an
+	   expected connection, IP_CT_RELATED. */
+	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+		return NF_ACCEPT;
+
+	hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+	repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+	/* We're not in hash table, and we refuse to set up related
+	   connections for unconfirmed conns.  But packet copies and
+	   REJECT will give spurious warnings here. */
+	/* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
+
+	/* No external references means noone else could have
+	   confirmed us. */
+	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
+	DEBUGP("Confirming conntrack %p\n", ct);
+
+	write_lock_bh(&nf_conntrack_lock);
+
+	/* See if there's one in the list already, including reverse:
+	   NAT could have grabbed it without realizing, since we're
+	   not in the hash.  If there is, we lost race. */
+	if (!LIST_FIND(&nf_conntrack_hash[hash],
+		       conntrack_tuple_cmp,
+		       struct nf_conntrack_tuple_hash *,
+		       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL)
+	    && !LIST_FIND(&nf_conntrack_hash[repl_hash],
+			  conntrack_tuple_cmp,
+			  struct nf_conntrack_tuple_hash *,
+			  &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) {
+		/* Remove from unconfirmed list */
+		list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+
+		list_prepend(&nf_conntrack_hash[hash],
+			     &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
+		list_prepend(&nf_conntrack_hash[repl_hash],
+			     &ct->tuplehash[IP_CT_DIR_REPLY]);
+		/* Timer relative to confirmation time, not original
+		   setting time, otherwise we'd get timer wrap in
+		   weird delay cases. */
+		ct->timeout.expires += jiffies;
+		add_timer(&ct->timeout);
+		atomic_inc(&ct->ct_general.use);
+		set_bit(IPS_CONFIRMED_BIT, &ct->status);
+		NF_CT_STAT_INC(insert);
+		write_unlock_bh(&nf_conntrack_lock);
+		if (ct->helper)
+			nf_conntrack_event_cache(IPCT_HELPER, *pskb);
+#ifdef CONFIG_NF_NAT_NEEDED
+		if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
+		    test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
+			nf_conntrack_event_cache(IPCT_NATINFO, *pskb);
+#endif
+		nf_conntrack_event_cache(master_ct(ct) ?
+					 IPCT_RELATED : IPCT_NEW, *pskb);
+		return NF_ACCEPT;
+	}
+
+	NF_CT_STAT_INC(insert_failed);
+	write_unlock_bh(&nf_conntrack_lock);
+	return NF_DROP;
+}
+
+/* Returns true if a connection correspondings to the tuple (required
+   for NAT). */
+int
+nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+			 const struct nf_conn *ignored_conntrack)
+{
+	struct nf_conntrack_tuple_hash *h;
+
+	read_lock_bh(&nf_conntrack_lock);
+	h = __nf_conntrack_find(tuple, ignored_conntrack);
+	read_unlock_bh(&nf_conntrack_lock);
+
+	return h != NULL;
+}
+
+/* There's a small race here where we may free a just-assured
+   connection.  Too bad: we're in trouble anyway. */
+static inline int unreplied(const struct nf_conntrack_tuple_hash *i)
+{
+	return !(test_bit(IPS_ASSURED_BIT,
+			  &nf_ct_tuplehash_to_ctrack(i)->status));
+}
+
+static int early_drop(struct list_head *chain)
+{
+	/* Traverse backwards: gives us oldest, which is roughly LRU */
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct = NULL;
+	int dropped = 0;
+
+	read_lock_bh(&nf_conntrack_lock);
+	h = LIST_FIND_B(chain, unreplied, struct nf_conntrack_tuple_hash *);
+	if (h) {
+		ct = nf_ct_tuplehash_to_ctrack(h);
+		atomic_inc(&ct->ct_general.use);
+	}
+	read_unlock_bh(&nf_conntrack_lock);
+
+	if (!ct)
+		return dropped;
+
+	if (del_timer(&ct->timeout)) {
+		death_by_timeout((unsigned long)ct);
+		dropped = 1;
+		NF_CT_STAT_INC(early_drop);
+	}
+	nf_ct_put(ct);
+	return dropped;
+}
+
+static inline int helper_cmp(const struct nf_conntrack_helper *i,
+			     const struct nf_conntrack_tuple *rtuple)
+{
+	return nf_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
+}
+
+static struct nf_conntrack_helper *
+nf_ct_find_helper(const struct nf_conntrack_tuple *tuple)
+{
+	return LIST_FIND(&helpers, helper_cmp,
+			 struct nf_conntrack_helper *,
+			 tuple);
+}
+
+static struct nf_conn *
+__nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
+		     const struct nf_conntrack_tuple *repl,
+		     const struct nf_conntrack_l3proto *l3proto)
+{
+	struct nf_conn *conntrack = NULL;
+	u_int32_t features = 0;
+
+	if (!nf_conntrack_hash_rnd_initted) {
+		get_random_bytes(&nf_conntrack_hash_rnd, 4);
+		nf_conntrack_hash_rnd_initted = 1;
+	}
+
+	if (nf_conntrack_max
+	    && atomic_read(&nf_conntrack_count) >= nf_conntrack_max) {
+		unsigned int hash = hash_conntrack(orig);
+		/* Try dropping from this hash chain. */
+		if (!early_drop(&nf_conntrack_hash[hash])) {
+			if (net_ratelimit())
+				printk(KERN_WARNING
+				       "nf_conntrack: table full, dropping"
+				       " packet.\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	/*  find features needed by this conntrack. */
+	features = l3proto->get_features(orig);
+	read_lock_bh(&nf_conntrack_lock);
+	if (nf_ct_find_helper(repl) != NULL)
+		features |= NF_CT_F_HELP;
+	read_unlock_bh(&nf_conntrack_lock);
+
+	DEBUGP("nf_conntrack_alloc: features=0x%x\n", features);
+
+	read_lock_bh(&nf_ct_cache_lock);
+
+	if (!nf_ct_cache[features].use) {
+		DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n",
+			features);
+		goto out;
+	}
+
+	conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC);
+	if (conntrack == NULL) {
+		DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n");
+		goto out;
+	}
+
+	memset(conntrack, 0, nf_ct_cache[features].size);
+	conntrack->features = features;
+	if (nf_ct_cache[features].init_conntrack &&
+	    nf_ct_cache[features].init_conntrack(conntrack, features) < 0) {
+		DEBUGP("nf_conntrack_alloc: failed to init\n");
+		kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
+		conntrack = NULL;
+		goto out;
+	}
+
+	atomic_set(&conntrack->ct_general.use, 1);
+	conntrack->ct_general.destroy = destroy_conntrack;
+	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
+	conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
+	/* Don't set timer yet: wait for confirmation */
+	init_timer(&conntrack->timeout);
+	conntrack->timeout.data = (unsigned long)conntrack;
+	conntrack->timeout.function = death_by_timeout;
+
+	atomic_inc(&nf_conntrack_count);
+out:
+	read_unlock_bh(&nf_ct_cache_lock);
+	return conntrack;
+}
+
+struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
+				   const struct nf_conntrack_tuple *repl)
+{
+	struct nf_conntrack_l3proto *l3proto;
+
+	l3proto = nf_ct_find_l3proto(orig->src.l3num);
+	return __nf_conntrack_alloc(orig, repl, l3proto);
+}
+
+void nf_conntrack_free(struct nf_conn *conntrack)
+{
+	u_int32_t features = conntrack->features;
+	NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM);
+	DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features,
+	       conntrack);
+	kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
+	atomic_dec(&nf_conntrack_count);
+}
+
+/* Allocate a new conntrack: we return -ENOMEM if classification
+   failed due to stress.  Otherwise it really is unclassifiable. */
+static struct nf_conntrack_tuple_hash *
+init_conntrack(const struct nf_conntrack_tuple *tuple,
+	       struct nf_conntrack_l3proto *l3proto,
+	       struct nf_conntrack_protocol *protocol,
+	       struct sk_buff *skb,
+	       unsigned int dataoff)
+{
+	struct nf_conn *conntrack;
+	struct nf_conntrack_tuple repl_tuple;
+	struct nf_conntrack_expect *exp;
+
+	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, protocol)) {
+		DEBUGP("Can't invert tuple.\n");
+		return NULL;
+	}
+
+	conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto);
+	if (conntrack == NULL || IS_ERR(conntrack)) {
+		DEBUGP("Can't allocate conntrack.\n");
+		return (struct nf_conntrack_tuple_hash *)conntrack;
+	}
+
+	if (!protocol->new(conntrack, skb, dataoff)) {
+		nf_conntrack_free(conntrack);
+		DEBUGP("init conntrack: can't track with proto module\n");
+		return NULL;
+	}
+
+	write_lock_bh(&nf_conntrack_lock);
+	exp = find_expectation(tuple);
+
+	if (exp) {
+		DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
+			conntrack, exp);
+		/* Welcome, Mr. Bond.  We've been expecting you... */
+		__set_bit(IPS_EXPECTED_BIT, &conntrack->status);
+		conntrack->master = exp->master;
+#ifdef CONFIG_NF_CONNTRACK_MARK
+		conntrack->mark = exp->master->mark;
+#endif
+		nf_conntrack_get(&conntrack->master->ct_general);
+		NF_CT_STAT_INC(expect_new);
+	} else {
+		conntrack->helper = nf_ct_find_helper(&repl_tuple);
+
+		NF_CT_STAT_INC(new);
+        }
+
+	/* Overload tuple linked list to put us in unconfirmed list. */
+	list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
+
+	write_unlock_bh(&nf_conntrack_lock);
+
+	if (exp) {
+		if (exp->expectfn)
+			exp->expectfn(conntrack, exp);
+		nf_conntrack_expect_put(exp);
+	}
+
+	return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
+}
+
+/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
+static inline struct nf_conn *
+resolve_normal_ct(struct sk_buff *skb,
+		  unsigned int dataoff,
+		  u_int16_t l3num,
+		  u_int8_t protonum,
+		  struct nf_conntrack_l3proto *l3proto,
+		  struct nf_conntrack_protocol *proto,
+		  int *set_reply,
+		  enum ip_conntrack_info *ctinfo)
+{
+	struct nf_conntrack_tuple tuple;
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct;
+
+	if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data),
+			     dataoff, l3num, protonum, &tuple, l3proto,
+			     proto)) {
+		DEBUGP("resolve_normal_ct: Can't get tuple\n");
+		return NULL;
+	}
+
+	/* look for tuple match */
+	h = nf_conntrack_find_get(&tuple, NULL);
+	if (!h) {
+		h = init_conntrack(&tuple, l3proto, proto, skb, dataoff);
+		if (!h)
+			return NULL;
+		if (IS_ERR(h))
+			return (void *)h;
+	}
+	ct = nf_ct_tuplehash_to_ctrack(h);
+
+	/* It exists; we have (non-exclusive) reference. */
+	if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
+		*ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
+		/* Please set reply bit if this packet OK */
+		*set_reply = 1;
+	} else {
+		/* Once we've had two way comms, always ESTABLISHED. */
+		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+			DEBUGP("nf_conntrack_in: normal packet for %p\n", ct);
+			*ctinfo = IP_CT_ESTABLISHED;
+		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+			DEBUGP("nf_conntrack_in: related packet for %p\n", ct);
+			*ctinfo = IP_CT_RELATED;
+		} else {
+			DEBUGP("nf_conntrack_in: new packet for %p\n", ct);
+			*ctinfo = IP_CT_NEW;
+		}
+		*set_reply = 0;
+	}
+	skb->nfct = &ct->ct_general;
+	skb->nfctinfo = *ctinfo;
+	return ct;
+}
+
+unsigned int
+nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conntrack_l3proto *l3proto;
+	struct nf_conntrack_protocol *proto;
+	unsigned int dataoff;
+	u_int8_t protonum;
+	int set_reply = 0;
+	int ret;
+
+	/* Previously seen (loopback or untracked)?  Ignore. */
+	if ((*pskb)->nfct) {
+		NF_CT_STAT_INC(ignore);
+		return NF_ACCEPT;
+	}
+
+	l3proto = nf_ct_find_l3proto((u_int16_t)pf);
+	if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) {
+		DEBUGP("not prepared to track yet or error occured\n");
+		return -ret;
+	}
+
+	proto = nf_ct_find_proto((u_int16_t)pf, protonum);
+
+	/* It may be an special packet, error, unclean...
+	 * inverse of the return code tells to the netfilter
+	 * core what to do with the packet. */
+	if (proto->error != NULL &&
+	    (ret = proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
+		NF_CT_STAT_INC(error);
+		NF_CT_STAT_INC(invalid);
+		return -ret;
+	}
+
+	ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, proto,
+			       &set_reply, &ctinfo);
+	if (!ct) {
+		/* Not valid part of a connection */
+		NF_CT_STAT_INC(invalid);
+		return NF_ACCEPT;
+	}
+
+	if (IS_ERR(ct)) {
+		/* Too stressed to deal. */
+		NF_CT_STAT_INC(drop);
+		return NF_DROP;
+	}
+
+	NF_CT_ASSERT((*pskb)->nfct);
+
+	ret = proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
+	if (ret < 0) {
+		/* Invalid: inverse of the return code tells
+		 * the netfilter core what to do */
+		DEBUGP("nf_conntrack_in: Can't track with proto module\n");
+		nf_conntrack_put((*pskb)->nfct);
+		(*pskb)->nfct = NULL;
+		NF_CT_STAT_INC(invalid);
+		return -ret;
+	}
+
+	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+		nf_conntrack_event_cache(IPCT_STATUS, *pskb);
+
+	return ret;
+}
+
+int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
+			 const struct nf_conntrack_tuple *orig)
+{
+	return nf_ct_invert_tuple(inverse, orig,
+				  nf_ct_find_l3proto(orig->src.l3num),
+				  nf_ct_find_proto(orig->src.l3num,
+						   orig->dst.protonum));
+}
+
+/* Would two expected things clash? */
+static inline int expect_clash(const struct nf_conntrack_expect *a,
+			       const struct nf_conntrack_expect *b)
+{
+	/* Part covered by intersection of masks must be unequal,
+	   otherwise they clash */
+	struct nf_conntrack_tuple intersect_mask;
+	int count;
+
+	intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
+	intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
+	intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
+	intersect_mask.dst.protonum = a->mask.dst.protonum
+					& b->mask.dst.protonum;
+
+	for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+		intersect_mask.src.u3.all[count] =
+			a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
+	}
+
+	for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+		intersect_mask.dst.u3.all[count] =
+			a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
+	}
+
+	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
+}
+
+static inline int expect_matches(const struct nf_conntrack_expect *a,
+				 const struct nf_conntrack_expect *b)
+{
+	return a->master == b->master
+		&& nf_ct_tuple_equal(&a->tuple, &b->tuple)
+		&& nf_ct_tuple_equal(&a->mask, &b->mask);
+}
+
+/* Generally a bad idea to call this: could have matched already. */
+void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
+{
+	struct nf_conntrack_expect *i;
+
+	write_lock_bh(&nf_conntrack_lock);
+	/* choose the the oldest expectation to evict */
+	list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
+		if (expect_matches(i, exp) && del_timer(&i->timeout)) {
+			nf_ct_unlink_expect(i);
+			write_unlock_bh(&nf_conntrack_lock);
+			nf_conntrack_expect_put(i);
+			return;
+		}
+	}
+	write_unlock_bh(&nf_conntrack_lock);
+}
+
+/* We don't increase the master conntrack refcount for non-fulfilled
+ * conntracks. During the conntrack destruction, the expectations are
+ * always killed before the conntrack itself */
+struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
+{
+	struct nf_conntrack_expect *new;
+
+	new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC);
+	if (!new) {
+		DEBUGP("expect_related: OOM allocating expect\n");
+		return NULL;
+	}
+	new->master = me;
+	atomic_set(&new->use, 1);
+	return new;
+}
+
+void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
+{
+	if (atomic_dec_and_test(&exp->use))
+		kmem_cache_free(nf_conntrack_expect_cachep, exp);
+}
+
+static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
+{
+	atomic_inc(&exp->use);
+	exp->master->expecting++;
+	list_add(&exp->list, &nf_conntrack_expect_list);
+
+	init_timer(&exp->timeout);
+	exp->timeout.data = (unsigned long)exp;
+	exp->timeout.function = expectation_timed_out;
+	exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ;
+	add_timer(&exp->timeout);
+
+	atomic_inc(&exp->use);
+	NF_CT_STAT_INC(expect_create);
+}
+
+/* Race with expectations being used means we could have none to find; OK. */
+static void evict_oldest_expect(struct nf_conn *master)
+{
+	struct nf_conntrack_expect *i;
+
+	list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
+		if (i->master == master) {
+			if (del_timer(&i->timeout)) {
+				nf_ct_unlink_expect(i);
+				nf_conntrack_expect_put(i);
+			}
+			break;
+		}
+	}
+}
+
+static inline int refresh_timer(struct nf_conntrack_expect *i)
+{
+	if (!del_timer(&i->timeout))
+		return 0;
+
+	i->timeout.expires = jiffies + i->master->helper->timeout*HZ;
+	add_timer(&i->timeout);
+	return 1;
+}
+
+int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
+{
+	struct nf_conntrack_expect *i;
+	int ret;
+
+	DEBUGP("nf_conntrack_expect_related %p\n", related_to);
+	DEBUGP("tuple: "); NF_CT_DUMP_TUPLE(&expect->tuple);
+	DEBUGP("mask:  "); NF_CT_DUMP_TUPLE(&expect->mask);
+
+	write_lock_bh(&nf_conntrack_lock);
+	list_for_each_entry(i, &nf_conntrack_expect_list, list) {
+		if (expect_matches(i, expect)) {
+			/* Refresh timer: if it's dying, ignore.. */
+			if (refresh_timer(i)) {
+				ret = 0;
+				goto out;
+			}
+		} else if (expect_clash(i, expect)) {
+			ret = -EBUSY;
+			goto out;
+		}
+	}
+	/* Will be over limit? */
+	if (expect->master->helper->max_expected && 
+	    expect->master->expecting >= expect->master->helper->max_expected)
+		evict_oldest_expect(expect->master);
+
+	nf_conntrack_expect_insert(expect);
+	nf_conntrack_expect_event(IPEXP_NEW, expect);
+	ret = 0;
+out:
+	write_unlock_bh(&nf_conntrack_lock);
+	return ret;
+}
+
+/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
+   implicitly racy: see __nf_conntrack_confirm */
+void nf_conntrack_alter_reply(struct nf_conn *conntrack,
+			      const struct nf_conntrack_tuple *newreply)
+{
+	write_lock_bh(&nf_conntrack_lock);
+	/* Should be unconfirmed, so not in hash table yet */
+	NF_CT_ASSERT(!nf_ct_is_confirmed(conntrack));
+
+	DEBUGP("Altering reply tuple of %p to ", conntrack);
+	NF_CT_DUMP_TUPLE(newreply);
+
+	conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+	if (!conntrack->master && conntrack->expecting == 0)
+		conntrack->helper = nf_ct_find_helper(newreply);
+	write_unlock_bh(&nf_conntrack_lock);
+}
+
+int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
+{
+	int ret;
+	BUG_ON(me->timeout == 0);
+
+	ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help",
+					  sizeof(struct nf_conn)
+					  + sizeof(union nf_conntrack_help)
+					  + __alignof__(union nf_conntrack_help),
+					  init_conntrack_for_helper);
+	if (ret < 0) {
+		printk(KERN_ERR "nf_conntrack_helper_reigster: Unable to create slab cache for conntracks\n");
+		return ret;
+	}
+	write_lock_bh(&nf_conntrack_lock);
+	list_prepend(&helpers, me);
+	write_unlock_bh(&nf_conntrack_lock);
+
+	return 0;
+}
+
+static inline int unhelp(struct nf_conntrack_tuple_hash *i,
+			 const struct nf_conntrack_helper *me)
+{
+	if (nf_ct_tuplehash_to_ctrack(i)->helper == me) {
+		nf_conntrack_event(IPCT_HELPER, nf_ct_tuplehash_to_ctrack(i));
+		nf_ct_tuplehash_to_ctrack(i)->helper = NULL;
+	}
+	return 0;
+}
+
+void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
+{
+	unsigned int i;
+	struct nf_conntrack_expect *exp, *tmp;
+
+	/* Need write lock here, to delete helper. */
+	write_lock_bh(&nf_conntrack_lock);
+	LIST_DELETE(&helpers, me);
+
+	/* Get rid of expectations */
+	list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) {
+		if (exp->master->helper == me && del_timer(&exp->timeout)) {
+			nf_ct_unlink_expect(exp);
+			nf_conntrack_expect_put(exp);
+		}
+	}
+
+	/* Get rid of expecteds, set helpers to NULL. */
+	LIST_FIND_W(&unconfirmed, unhelp, struct nf_conntrack_tuple_hash*, me);
+	for (i = 0; i < nf_conntrack_htable_size; i++)
+		LIST_FIND_W(&nf_conntrack_hash[i], unhelp,
+			    struct nf_conntrack_tuple_hash *, me);
+	write_unlock_bh(&nf_conntrack_lock);
+
+	/* Someone could be still looking at the helper in a bh. */
+	synchronize_net();
+}
+
+/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
+void __nf_ct_refresh_acct(struct nf_conn *ct,
+			  enum ip_conntrack_info ctinfo,
+			  const struct sk_buff *skb,
+			  unsigned long extra_jiffies,
+			  int do_acct)
+{
+	int event = 0;
+
+	NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
+	NF_CT_ASSERT(skb);
+
+	write_lock_bh(&nf_conntrack_lock);
+
+	/* If not in hash table, timer will not be active yet */
+	if (!nf_ct_is_confirmed(ct)) {
+		ct->timeout.expires = extra_jiffies;
+		event = IPCT_REFRESH;
+	} else {
+		/* Need del_timer for race avoidance (may already be dying). */
+		if (del_timer(&ct->timeout)) {
+			ct->timeout.expires = jiffies + extra_jiffies;
+			add_timer(&ct->timeout);
+			event = IPCT_REFRESH;
+		}
+	}
+
+#ifdef CONFIG_NF_CT_ACCT
+	if (do_acct) {
+		ct->counters[CTINFO2DIR(ctinfo)].packets++;
+		ct->counters[CTINFO2DIR(ctinfo)].bytes +=
+			skb->len - (unsigned int)(skb->nh.raw - skb->data);
+	if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
+	    || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
+		event |= IPCT_COUNTER_FILLING;
+	}
+#endif
+
+	write_unlock_bh(&nf_conntrack_lock);
+
+	/* must be unlocked when calling event cache */
+	if (event)
+		nf_conntrack_event_cache(event, skb);
+}
+
+/* Used by ipt_REJECT and ip6t_REJECT. */
+void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	/* This ICMP is in reverse direction to the packet which caused it */
+	ct = nf_ct_get(skb, &ctinfo);
+	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
+		ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
+	else
+		ctinfo = IP_CT_RELATED;
+
+	/* Attach to new skbuff, and increment count */
+	nskb->nfct = &ct->ct_general;
+	nskb->nfctinfo = ctinfo;
+	nf_conntrack_get(nskb->nfct);
+}
+
+static inline int
+do_iter(const struct nf_conntrack_tuple_hash *i,
+	int (*iter)(struct nf_conn *i, void *data),
+	void *data)
+{
+	return iter(nf_ct_tuplehash_to_ctrack(i), data);
+}
+
+/* Bring out ya dead! */
+static struct nf_conntrack_tuple_hash *
+get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
+		void *data, unsigned int *bucket)
+{
+	struct nf_conntrack_tuple_hash *h = NULL;
+
+	write_lock_bh(&nf_conntrack_lock);
+	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+		h = LIST_FIND_W(&nf_conntrack_hash[*bucket], do_iter,
+				struct nf_conntrack_tuple_hash *, iter, data);
+		if (h)
+			break;
+ 	}
+	if (!h)
+		h = LIST_FIND_W(&unconfirmed, do_iter,
+				struct nf_conntrack_tuple_hash *, iter, data);
+	if (h)
+		atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
+	write_unlock_bh(&nf_conntrack_lock);
+
+	return h;
+}
+
+void
+nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
+{
+	struct nf_conntrack_tuple_hash *h;
+	unsigned int bucket = 0;
+
+	while ((h = get_next_corpse(iter, data, &bucket)) != NULL) {
+		struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+		/* Time to push up daises... */
+		if (del_timer(&ct->timeout))
+			death_by_timeout((unsigned long)ct);
+		/* ... else the timer will get him soon. */
+
+		nf_ct_put(ct);
+	}
+}
+
+static int kill_all(struct nf_conn *i, void *data)
+{
+	return 1;
+}
+
+static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size)
+{
+	if (vmalloced)
+		vfree(hash);
+	else
+		free_pages((unsigned long)hash, 
+			   get_order(sizeof(struct list_head) * size));
+}
+
+/* Mishearing the voices in his head, our hero wonders how he's
+   supposed to kill the mall. */
+void nf_conntrack_cleanup(void)
+{
+	int i;
+
+	/* This makes sure all current packets have passed through
+	   netfilter framework.  Roll on, two-stage module
+	   delete... */
+	synchronize_net();
+
+	nf_ct_event_cache_flush();
+ i_see_dead_people:
+	nf_ct_iterate_cleanup(kill_all, NULL);
+	if (atomic_read(&nf_conntrack_count) != 0) {
+		schedule();
+		goto i_see_dead_people;
+	}
+
+	for (i = 0; i < NF_CT_F_NUM; i++) {
+		if (nf_ct_cache[i].use == 0)
+			continue;
+
+		NF_CT_ASSERT(nf_ct_cache[i].use == 1);
+		nf_ct_cache[i].use = 1;
+		nf_conntrack_unregister_cache(i);
+	}
+	kmem_cache_destroy(nf_conntrack_expect_cachep);
+	free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
+			    nf_conntrack_htable_size);
+}
+
+static struct list_head *alloc_hashtable(int size, int *vmalloced)
+{
+	struct list_head *hash;
+	unsigned int i;
+
+	*vmalloced = 0; 
+	hash = (void*)__get_free_pages(GFP_KERNEL, 
+				       get_order(sizeof(struct list_head)
+						 * size));
+	if (!hash) { 
+		*vmalloced = 1;
+		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
+		hash = vmalloc(sizeof(struct list_head) * size);
+	}
+
+	if (hash)
+		for (i = 0; i < size; i++) 
+			INIT_LIST_HEAD(&hash[i]);
+
+	return hash;
+}
+
+int set_hashsize(const char *val, struct kernel_param *kp)
+{
+	int i, bucket, hashsize, vmalloced;
+	int old_vmalloced, old_size;
+	int rnd;
+	struct list_head *hash, *old_hash;
+	struct nf_conntrack_tuple_hash *h;
+
+	/* On boot, we can set this without any fancy locking. */
+	if (!nf_conntrack_htable_size)
+		return param_set_uint(val, kp);
+
+	hashsize = simple_strtol(val, NULL, 0);
+	if (!hashsize)
+		return -EINVAL;
+
+	hash = alloc_hashtable(hashsize, &vmalloced);
+	if (!hash)
+		return -ENOMEM;
+
+	/* We have to rehahs for the new table anyway, so we also can
+	 * use a newrandom seed */
+	get_random_bytes(&rnd, 4);
+
+	write_lock_bh(&nf_conntrack_lock);
+	for (i = 0; i < nf_conntrack_htable_size; i++) {
+		while (!list_empty(&nf_conntrack_hash[i])) {
+			h = list_entry(nf_conntrack_hash[i].next,
+				       struct nf_conntrack_tuple_hash, list);
+			list_del(&h->list);
+			bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
+			list_add_tail(&h->list, &hash[bucket]);
+		}
+	}
+	old_size = nf_conntrack_htable_size;
+	old_vmalloced = nf_conntrack_vmalloc;
+	old_hash = nf_conntrack_hash;
+
+	nf_conntrack_htable_size = hashsize;
+	nf_conntrack_vmalloc = vmalloced;
+	nf_conntrack_hash = hash;
+	nf_conntrack_hash_rnd = rnd;
+	write_unlock_bh(&nf_conntrack_lock);
+
+	free_conntrack_hash(old_hash, old_vmalloced, old_size);
+	return 0;
+}
+
+module_param_call(hashsize, set_hashsize, param_get_uint,
+		  &nf_conntrack_htable_size, 0600);
+
+int __init nf_conntrack_init(void)
+{
+	unsigned int i;
+	int ret;
+
+	/* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
+	 * machine has 256 buckets.  >= 1GB machines have 8192 buckets. */
+	if (!nf_conntrack_htable_size) {
+		nf_conntrack_htable_size
+			= (((num_physpages << PAGE_SHIFT) / 16384)
+			   / sizeof(struct list_head));
+		if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+			nf_conntrack_htable_size = 8192;
+		if (nf_conntrack_htable_size < 16)
+			nf_conntrack_htable_size = 16;
+	}
+	nf_conntrack_max = 8 * nf_conntrack_htable_size;
+
+	printk("nf_conntrack version %s (%u buckets, %d max)\n",
+	       NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
+	       nf_conntrack_max);
+
+	nf_conntrack_hash = alloc_hashtable(nf_conntrack_htable_size,
+					    &nf_conntrack_vmalloc);
+	if (!nf_conntrack_hash) {
+		printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
+		goto err_out;
+	}
+
+	ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic",
+					  sizeof(struct nf_conn), NULL);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to create nf_conn slab cache\n");
+		goto err_free_hash;
+	}
+
+	nf_conntrack_expect_cachep = kmem_cache_create("nf_conntrack_expect",
+					sizeof(struct nf_conntrack_expect),
+					0, 0, NULL, NULL);
+	if (!nf_conntrack_expect_cachep) {
+		printk(KERN_ERR "Unable to create nf_expect slab cache\n");
+		goto err_free_conntrack_slab;
+	}
+
+	/* Don't NEED lock here, but good form anyway. */
+	write_lock_bh(&nf_conntrack_lock);
+        for (i = 0; i < PF_MAX; i++)
+		nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto;
+        write_unlock_bh(&nf_conntrack_lock);
+
+	/* Set up fake conntrack:
+	    - to never be deleted, not in any hashes */
+	atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
+	/*  - and look it like as a confirmed connection */
+	set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
+
+	return ret;
+
+err_free_conntrack_slab:
+	nf_conntrack_unregister_cache(NF_CT_F_BASIC);
+err_free_hash:
+	free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
+			    nf_conntrack_htable_size);
+err_out:
+	return -ENOMEM;
+}
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
new file mode 100644
index 0000000..65080e2
--- /dev/null
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -0,0 +1,698 @@
+/* FTP extension for connection tracking. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- enable working with Layer 3 protocol independent connection tracking.
+ *	- track EPRT and EPSV commands with IPv6 address.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_ftp.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netfilter.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/ctype.h>
+#include <net/checksum.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_ftp.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
+MODULE_DESCRIPTION("ftp connection tracking helper");
+
+/* This is slow, but it's simple. --RR */
+static char *ftp_buffer;
+
+static DEFINE_SPINLOCK(nf_ftp_lock);
+
+#define MAX_PORTS 8
+static u_int16_t ports[MAX_PORTS];
+static unsigned int ports_c;
+module_param_array(ports, ushort, &ports_c, 0400);
+
+static int loose;
+module_param(loose, int, 0600);
+
+unsigned int (*nf_nat_ftp_hook)(struct sk_buff **pskb,
+				enum ip_conntrack_info ctinfo,
+				enum ip_ct_ftp_type type,
+				unsigned int matchoff,
+				unsigned int matchlen,
+				struct nf_conntrack_expect *exp,
+				u32 *seq);
+EXPORT_SYMBOL_GPL(nf_nat_ftp_hook);
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char);
+static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char);
+static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *,
+			     char);
+
+static struct ftp_search {
+	enum ip_conntrack_dir dir;
+	const char *pattern;
+	size_t plen;
+	char skip;
+	char term;
+	enum ip_ct_ftp_type ftptype;
+	int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char);
+} search[] = {
+	{
+		IP_CT_DIR_ORIGINAL,
+		"PORT", sizeof("PORT") - 1, ' ', '\r',
+		IP_CT_FTP_PORT,
+		try_rfc959,
+	},
+	{
+		IP_CT_DIR_REPLY,
+		"227 ", sizeof("227 ") - 1, '(', ')',
+		IP_CT_FTP_PASV,
+		try_rfc959,
+	},
+	{
+		IP_CT_DIR_ORIGINAL,
+		"EPRT", sizeof("EPRT") - 1, ' ', '\r',
+		IP_CT_FTP_EPRT,
+		try_eprt,
+	},
+	{
+		IP_CT_DIR_REPLY,
+		"229 ", sizeof("229 ") - 1, '(', ')',
+		IP_CT_FTP_EPSV,
+		try_epsv_response,
+	},
+};
+
+/* This code is based on inet_pton() in glibc-2.2.4 */
+static int
+get_ipv6_addr(const char *src, size_t dlen, struct in6_addr *dst, u_int8_t term)
+{
+	static const char xdigits[] = "0123456789abcdef";
+	u_int8_t tmp[16], *tp, *endp, *colonp;
+	int ch, saw_xdigit;
+	u_int32_t val;
+	size_t clen = 0;
+
+	tp = memset(tmp, '\0', sizeof(tmp));
+	endp = tp + sizeof(tmp);
+	colonp = NULL;
+
+	/* Leading :: requires some special handling. */
+	if (*src == ':'){
+		if (*++src != ':') {
+			DEBUGP("invalid \":\" at the head of addr\n");
+			return 0;
+		}
+		clen++;
+	}
+
+	saw_xdigit = 0;
+	val = 0;
+	while ((clen < dlen) && (*src != term)) {
+		const char *pch;
+
+		ch = tolower(*src++);
+		clen++;
+
+                pch = strchr(xdigits, ch);
+                if (pch != NULL) {
+                        val <<= 4;
+                        val |= (pch - xdigits);
+                        if (val > 0xffff)
+                                return 0;
+
+			saw_xdigit = 1;
+                        continue;
+                }
+		if (ch != ':') {
+			DEBUGP("get_ipv6_addr: invalid char. \'%c\'\n", ch);
+			return 0;
+		}
+
+		if (!saw_xdigit) {
+			if (colonp) {
+				DEBUGP("invalid location of \"::\".\n");
+				return 0;
+			}
+			colonp = tp;
+			continue;
+		} else if (*src == term) {
+			DEBUGP("trancated IPv6 addr\n");
+			return 0;
+		}
+
+		if (tp + 2 > endp)
+			return 0;
+		*tp++ = (u_int8_t) (val >> 8) & 0xff;
+		*tp++ = (u_int8_t) val & 0xff;
+
+		saw_xdigit = 0;
+		val = 0;
+		continue;
+        }
+        if (saw_xdigit) {
+                if (tp + 2 > endp)
+                        return 0;
+                *tp++ = (u_int8_t) (val >> 8) & 0xff;
+                *tp++ = (u_int8_t) val & 0xff;
+        }
+        if (colonp != NULL) {
+                /*
+                 * Since some memmove()'s erroneously fail to handle
+                 * overlapping regions, we'll do the shift by hand.
+                 */
+                const int n = tp - colonp;
+                int i;
+
+                if (tp == endp)
+                        return 0;
+
+                for (i = 1; i <= n; i++) {
+                        endp[- i] = colonp[n - i];
+                        colonp[n - i] = 0;
+                }
+                tp = endp;
+        }
+        if (tp != endp || (*src != term))
+                return 0;
+
+        memcpy(dst->s6_addr, tmp, sizeof(dst->s6_addr));
+        return clen;
+}
+
+static int try_number(const char *data, size_t dlen, u_int32_t array[],
+                      int array_size, char sep, char term)
+{
+	u_int32_t i, len;
+
+	memset(array, 0, sizeof(array[0])*array_size);
+
+	/* Keep data pointing at next char. */
+	for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) {
+		if (*data >= '0' && *data <= '9') {
+			array[i] = array[i]*10 + *data - '0';
+		}
+		else if (*data == sep)
+			i++;
+		else {
+			/* Unexpected character; true if it's the
+			   terminator and we're finished. */
+			if (*data == term && i == array_size - 1)
+				return len;
+
+			DEBUGP("Char %u (got %u nums) `%u' unexpected\n",
+			       len, i, *data);
+			return 0;
+		}
+	}
+	DEBUGP("Failed to fill %u numbers separated by %c\n", array_size, sep);
+
+	return 0;
+}
+
+/* Returns 0, or length of numbers: 192,168,1,1,5,6 */
+static int try_rfc959(const char *data, size_t dlen,
+		      struct nf_conntrack_man *cmd, char term)
+{
+	int length;
+	u_int32_t array[6];
+
+	length = try_number(data, dlen, array, 6, ',', term);
+	if (length == 0)
+		return 0;
+
+	cmd->u3.ip =  htonl((array[0] << 24) | (array[1] << 16) |
+				    (array[2] << 8) | array[3]);
+	cmd->u.tcp.port = htons((array[4] << 8) | array[5]);
+	return length;
+}
+
+/* Grab port: number up to delimiter */
+static int get_port(const char *data, int start, size_t dlen, char delim,
+		    u_int16_t *port)
+{
+	u_int16_t tmp_port = 0;
+	int i;
+
+	for (i = start; i < dlen; i++) {
+		/* Finished? */
+		if (data[i] == delim) {
+			if (tmp_port == 0)
+				break;
+			*port = htons(tmp_port);
+			DEBUGP("get_port: return %d\n", tmp_port);
+			return i + 1;
+		}
+		else if (data[i] >= '0' && data[i] <= '9')
+			tmp_port = tmp_port*10 + data[i] - '0';
+		else { /* Some other crap */
+			DEBUGP("get_port: invalid char.\n");
+			break;
+		}
+	}
+	return 0;
+}
+
+/* Returns 0, or length of numbers: |1|132.235.1.2|6275| or |2|3ffe::1|6275| */
+static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd,
+		    char term)
+{
+	char delim;
+	int length;
+
+	/* First character is delimiter, then "1" for IPv4 or "2" for IPv6,
+	   then delimiter again. */
+	if (dlen <= 3) {
+		DEBUGP("EPRT: too short\n");
+		return 0;
+	}
+	delim = data[0];
+	if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) {
+		DEBUGP("try_eprt: invalid delimitter.\n");
+		return 0;
+	}
+
+	if ((cmd->l3num == PF_INET && data[1] != '1') ||
+	    (cmd->l3num == PF_INET6 && data[1] != '2')) {
+		DEBUGP("EPRT: invalid protocol number.\n");
+		return 0;
+	}
+
+	DEBUGP("EPRT: Got %c%c%c\n", delim, data[1], delim);
+
+	if (data[1] == '1') {
+		u_int32_t array[4];
+
+		/* Now we have IP address. */
+		length = try_number(data + 3, dlen - 3, array, 4, '.', delim);
+		if (length != 0)
+			cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16)
+					   | (array[2] << 8) | array[3]);
+	} else {
+		/* Now we have IPv6 address. */
+		length = get_ipv6_addr(data + 3, dlen - 3,
+				       (struct in6_addr *)cmd->u3.ip6, delim);
+	}
+
+	if (length == 0)
+		return 0;
+	DEBUGP("EPRT: Got IP address!\n");
+	/* Start offset includes initial "|1|", and trailing delimiter */
+	return get_port(data, 3 + length + 1, dlen, delim, &cmd->u.tcp.port);
+}
+
+/* Returns 0, or length of numbers: |||6446| */
+static int try_epsv_response(const char *data, size_t dlen,
+			     struct nf_conntrack_man *cmd, char term)
+{
+	char delim;
+
+	/* Three delimiters. */
+	if (dlen <= 3) return 0;
+	delim = data[0];
+	if (isdigit(delim) || delim < 33 || delim > 126
+	    || data[1] != delim || data[2] != delim)
+		return 0;
+
+	return get_port(data, 3, dlen, delim, &cmd->u.tcp.port);
+}
+
+/* Return 1 for match, 0 for accept, -1 for partial. */
+static int find_pattern(const char *data, size_t dlen,
+			const char *pattern, size_t plen,
+			char skip, char term,
+			unsigned int *numoff,
+			unsigned int *numlen,
+			struct nf_conntrack_man *cmd,
+			int (*getnum)(const char *, size_t,
+				      struct nf_conntrack_man *, char))
+{
+	size_t i;
+
+	DEBUGP("find_pattern `%s': dlen = %u\n", pattern, dlen);
+	if (dlen == 0)
+		return 0;
+
+	if (dlen <= plen) {
+		/* Short packet: try for partial? */
+		if (strnicmp(data, pattern, dlen) == 0)
+			return -1;
+		else return 0;
+	}
+
+	if (strnicmp(data, pattern, plen) != 0) {
+#if 0
+		size_t i;
+
+		DEBUGP("ftp: string mismatch\n");
+		for (i = 0; i < plen; i++) {
+			DEBUGP("ftp:char %u `%c'(%u) vs `%c'(%u)\n",
+				i, data[i], data[i],
+				pattern[i], pattern[i]);
+		}
+#endif
+		return 0;
+	}
+
+	DEBUGP("Pattern matches!\n");
+	/* Now we've found the constant string, try to skip
+	   to the 'skip' character */
+	for (i = plen; data[i] != skip; i++)
+		if (i == dlen - 1) return -1;
+
+	/* Skip over the last character */
+	i++;
+
+	DEBUGP("Skipped up to `%c'!\n", skip);
+
+	*numoff = i;
+	*numlen = getnum(data + i, dlen - i, cmd, term);
+	if (!*numlen)
+		return -1;
+
+	DEBUGP("Match succeeded!\n");
+	return 1;
+}
+
+/* Look up to see if we're just after a \n. */
+static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir)
+{
+	unsigned int i;
+
+	for (i = 0; i < info->seq_aft_nl_num[dir]; i++)
+		if (info->seq_aft_nl[dir][i] == seq)
+			return 1;
+	return 0;
+}
+
+/* We don't update if it's older than what we have. */
+static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir,
+			  struct sk_buff *skb)
+{
+	unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;
+
+	/* Look for oldest: if we find exact match, we're done. */
+	for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
+		if (info->seq_aft_nl[dir][i] == nl_seq)
+			return;
+
+		if (oldest == info->seq_aft_nl_num[dir]
+		    || before(info->seq_aft_nl[dir][i], oldest))
+			oldest = i;
+	}
+
+	if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
+		info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
+		nf_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb);
+	} else if (oldest != NUM_SEQ_TO_REMEMBER) {
+		info->seq_aft_nl[dir][oldest] = nl_seq;
+		nf_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb);
+	}
+}
+
+static int help(struct sk_buff **pskb,
+		unsigned int protoff,
+		struct nf_conn *ct,
+		enum ip_conntrack_info ctinfo)
+{
+	unsigned int dataoff, datalen;
+	struct tcphdr _tcph, *th;
+	char *fb_ptr;
+	int ret;
+	u32 seq;
+	int dir = CTINFO2DIR(ctinfo);
+	unsigned int matchlen, matchoff;
+	struct ip_ct_ftp_master *ct_ftp_info = &ct->help->ct_ftp_info;
+	struct nf_conntrack_expect *exp;
+	struct nf_conntrack_man cmd = {};
+
+	unsigned int i;
+	int found = 0, ends_in_nl;
+
+	/* Until there's been traffic both ways, don't look in packets. */
+	if (ctinfo != IP_CT_ESTABLISHED
+	    && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
+		DEBUGP("ftp: Conntrackinfo = %u\n", ctinfo);
+		return NF_ACCEPT;
+	}
+
+	th = skb_header_pointer(*pskb, protoff, sizeof(_tcph), &_tcph);
+	if (th == NULL)
+		return NF_ACCEPT;
+
+	dataoff = protoff + th->doff * 4;
+	/* No data? */
+	if (dataoff >= (*pskb)->len) {
+		DEBUGP("ftp: dataoff(%u) >= skblen(%u)\n", dataoff,
+			(*pskb)->len);
+		return NF_ACCEPT;
+	}
+	datalen = (*pskb)->len - dataoff;
+
+	spin_lock_bh(&nf_ftp_lock);
+	fb_ptr = skb_header_pointer(*pskb, dataoff, datalen, ftp_buffer);
+	BUG_ON(fb_ptr == NULL);
+
+	ends_in_nl = (fb_ptr[datalen - 1] == '\n');
+	seq = ntohl(th->seq) + datalen;
+
+	/* Look up to see if we're just after a \n. */
+	if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
+		/* Now if this ends in \n, update ftp info. */
+		DEBUGP("nf_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n",
+		       ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)",
+		       ct_ftp_info->seq_aft_nl[dir][0],
+		       ct_ftp_info->seq_aft_nl_num[dir] > 1 ? "" : "(UNSET)",
+		       ct_ftp_info->seq_aft_nl[dir][1]);
+		ret = NF_ACCEPT;
+		goto out_update_nl;
+	}
+
+        /* Initialize IP/IPv6 addr to expected address (it's not mentioned
+           in EPSV responses) */
+	cmd.l3num = ct->tuplehash[dir].tuple.src.l3num;
+	memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
+	       sizeof(cmd.u3.all));
+
+	for (i = 0; i < ARRAY_SIZE(search); i++) {
+		if (search[i].dir != dir) continue;
+
+		found = find_pattern(fb_ptr, datalen,
+				     search[i].pattern,
+				     search[i].plen,
+				     search[i].skip,
+				     search[i].term,
+				     &matchoff, &matchlen,
+				     &cmd,
+				     search[i].getnum);
+		if (found) break;
+	}
+	if (found == -1) {
+		/* We don't usually drop packets.  After all, this is
+		   connection tracking, not packet filtering.
+		   However, it is necessary for accurate tracking in
+		   this case. */
+		if (net_ratelimit())
+			printk("conntrack_ftp: partial %s %u+%u\n",
+			       search[i].pattern,
+			       ntohl(th->seq), datalen);
+		ret = NF_DROP;
+		goto out;
+	} else if (found == 0) { /* No match */
+		ret = NF_ACCEPT;
+		goto out_update_nl;
+	}
+
+	DEBUGP("conntrack_ftp: match `%.*s' (%u bytes at %u)\n",
+	       (int)matchlen, fb_ptr + matchoff,
+	       matchlen, ntohl(th->seq) + matchoff);
+
+	exp = nf_conntrack_expect_alloc(ct);
+	if (exp == NULL) {
+		ret = NF_DROP;
+		goto out;
+	}
+
+	/* We refer to the reverse direction ("!dir") tuples here,
+	 * because we're expecting something in the other direction.
+	 * Doesn't matter unless NAT is happening.  */
+	exp->tuple.dst.u3 = ct->tuplehash[!dir].tuple.dst.u3;
+
+	/* Update the ftp info */
+	if ((cmd.l3num == ct->tuplehash[dir].tuple.src.l3num) &&
+	    memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
+		     sizeof(cmd.u3.all))) {
+		/* Enrico Scholz's passive FTP to partially RNAT'd ftp
+                   server: it really wants us to connect to a
+                   different IP address.  Simply don't record it for
+                   NAT. */
+		if (cmd.l3num == PF_INET) {
+                	DEBUGP("conntrack_ftp: NOT RECORDING: %u,%u,%u,%u != %u.%u.%u.%u\n",
+			       NIPQUAD(cmd.u3.ip),
+			       NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip));
+		} else {
+			DEBUGP("conntrack_ftp: NOT RECORDING: %x:%x:%x:%x:%x:%x:%x:%x != %x:%x:%x:%x:%x:%x:%x:%x\n",
+			       NIP6(*((struct in6_addr *)cmd.u3.ip6)),
+			       NIP6(*((struct in6_addr *)ct->tuplehash[dir]
+							.tuple.src.u3.ip6)));
+		}
+
+		/* Thanks to Cristiano Lincoln Mattos
+		   <lincoln@cesar.org.br> for reporting this potential
+		   problem (DMZ machines opening holes to internal
+		   networks, or the packet filter itself). */
+		if (!loose) {
+			ret = NF_ACCEPT;
+			goto out_put_expect;
+		}
+		memcpy(&exp->tuple.dst.u3, &cmd.u3.all,
+		       sizeof(exp->tuple.dst.u3));
+	}
+
+	exp->tuple.src.u3 = ct->tuplehash[!dir].tuple.src.u3;
+	exp->tuple.src.l3num = cmd.l3num;
+	exp->tuple.src.u.tcp.port = 0;
+	exp->tuple.dst.u.tcp.port = cmd.u.tcp.port;
+	exp->tuple.dst.protonum = IPPROTO_TCP;
+
+	exp->mask = (struct nf_conntrack_tuple)
+		    { .src = { .l3num = 0xFFFF,
+			       .u = { .tcp = { 0 }},
+			     },
+		      .dst = { .protonum = 0xFF,
+			       .u = { .tcp = { 0xFFFF }},
+			     },
+		    };
+	if (cmd.l3num == PF_INET) {
+		exp->mask.src.u3.ip = 0xFFFFFFFF;
+		exp->mask.dst.u3.ip = 0xFFFFFFFF;
+	} else {
+		memset(exp->mask.src.u3.ip6, 0xFF,
+		       sizeof(exp->mask.src.u3.ip6));
+		memset(exp->mask.dst.u3.ip6, 0xFF,
+		       sizeof(exp->mask.src.u3.ip6));
+	}
+
+	exp->expectfn = NULL;
+	exp->flags = 0;
+
+	/* Now, NAT might want to mangle the packet, and register the
+	 * (possibly changed) expectation itself. */
+	if (nf_nat_ftp_hook)
+		ret = nf_nat_ftp_hook(pskb, ctinfo, search[i].ftptype,
+				      matchoff, matchlen, exp, &seq);
+	else {
+		/* Can't expect this?  Best to drop packet now. */
+		if (nf_conntrack_expect_related(exp) != 0)
+			ret = NF_DROP;
+		else
+			ret = NF_ACCEPT;
+	}
+
+out_put_expect:
+	nf_conntrack_expect_put(exp);
+
+out_update_nl:
+	/* Now if this ends in \n, update ftp info.  Seq may have been
+	 * adjusted by NAT code. */
+	if (ends_in_nl)
+		update_nl_seq(seq, ct_ftp_info, dir, *pskb);
+ out:
+	spin_unlock_bh(&nf_ftp_lock);
+	return ret;
+}
+
+static struct nf_conntrack_helper ftp[MAX_PORTS][2];
+static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")];
+
+/* don't make this __exit, since it's called from __init ! */
+static void fini(void)
+{
+	int i, j;
+	for (i = 0; i < ports_c; i++) {
+		for (j = 0; j < 2; j++) {
+			if (ftp[i][j].me == NULL)
+				continue;
+
+			DEBUGP("nf_ct_ftp: unregistering helper for pf: %d "
+			       "port: %d\n",
+				ftp[i][j].tuple.src.l3num, ports[i]);
+			nf_conntrack_helper_unregister(&ftp[i][j]);
+		}
+	}
+
+	kfree(ftp_buffer);
+}
+
+static int __init init(void)
+{
+	int i, j = -1, ret = 0;
+	char *tmpname;
+
+	ftp_buffer = kmalloc(65536, GFP_KERNEL);
+	if (!ftp_buffer)
+		return -ENOMEM;
+
+	if (ports_c == 0)
+		ports[ports_c++] = FTP_PORT;
+
+	/* FIXME should be configurable whether IPv4 and IPv6 FTP connections
+		 are tracked or not - YK */
+	for (i = 0; i < ports_c; i++) {
+		memset(&ftp[i], 0, sizeof(struct nf_conntrack_helper));
+
+		ftp[i][0].tuple.src.l3num = PF_INET;
+		ftp[i][1].tuple.src.l3num = PF_INET6;
+		for (j = 0; j < 2; j++) {
+			ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]);
+			ftp[i][j].tuple.dst.protonum = IPPROTO_TCP;
+			ftp[i][j].mask.src.u.tcp.port = 0xFFFF;
+			ftp[i][j].mask.dst.protonum = 0xFF;
+			ftp[i][j].max_expected = 1;
+			ftp[i][j].timeout = 5 * 60;	/* 5 Minutes */
+			ftp[i][j].me = THIS_MODULE;
+			ftp[i][j].help = help;
+			tmpname = &ftp_names[i][j][0];
+			if (ports[i] == FTP_PORT)
+				sprintf(tmpname, "ftp");
+			else
+				sprintf(tmpname, "ftp-%d", ports[i]);
+			ftp[i][j].name = tmpname;
+
+			DEBUGP("nf_ct_ftp: registering helper for pf: %d "
+			       "port: %d\n",
+				ftp[i][j].tuple.src.l3num, ports[i]);
+			ret = nf_conntrack_helper_register(&ftp[i][j]);
+			if (ret) {
+				printk("nf_ct_ftp: failed to register helper "
+				       " for pf: %d port: %d\n",
+					ftp[i][j].tuple.src.l3num, ports[i]);
+				fini();
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c
new file mode 100644
index 0000000..7de4f06
--- /dev/null
+++ b/net/netfilter/nf_conntrack_l3proto_generic.c
@@ -0,0 +1,98 @@
+/*
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * Based largely upon the original ip_conntrack code which
+ * had the following copyright information:
+ *
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Author:
+ *	Yasuyuki Kozakai @USAGI	<yasuyuki.kozakai@toshiba.co.jp>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/sysctl.h>
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+DECLARE_PER_CPU(struct nf_conntrack_stat, nf_conntrack_stat);
+
+static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
+				struct nf_conntrack_tuple *tuple)
+{
+	memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
+	memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
+
+	return 1;
+}
+
+static int generic_invert_tuple(struct nf_conntrack_tuple *tuple,
+			   const struct nf_conntrack_tuple *orig)
+{
+	memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
+	memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
+
+	return 1;
+}
+
+static int generic_print_tuple(struct seq_file *s,
+			    const struct nf_conntrack_tuple *tuple)
+{
+	return 0;
+}
+
+static int generic_print_conntrack(struct seq_file *s,
+				const struct nf_conn *conntrack)
+{
+	return 0;
+}
+
+static int
+generic_prepare(struct sk_buff **pskb, unsigned int hooknum,
+		unsigned int *dataoff, u_int8_t *protonum)
+{
+	/* Never track !!! */
+	return -NF_ACCEPT;
+}
+
+
+static u_int32_t generic_get_features(const struct nf_conntrack_tuple *tuple)
+				
+{
+	return NF_CT_F_BASIC;
+}
+
+struct nf_conntrack_l3proto nf_conntrack_generic_l3proto = {
+	.l3proto	 = PF_UNSPEC,
+	.name		 = "unknown",
+	.pkt_to_tuple	 = generic_pkt_to_tuple,
+	.invert_tuple	 = generic_invert_tuple,
+	.print_tuple	 = generic_print_tuple,
+	.print_conntrack = generic_print_conntrack,
+	.prepare	 = generic_prepare,
+	.get_features	 = generic_get_features,
+	.me		 = THIS_MODULE,
+};
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
new file mode 100644
index 0000000..36425f6
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -0,0 +1,85 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- enable working with L3 protocol independent connection tracking.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_proto_generic.c
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+
+unsigned long nf_ct_generic_timeout = 600*HZ;
+
+static int generic_pkt_to_tuple(const struct sk_buff *skb,
+				unsigned int dataoff,
+				struct nf_conntrack_tuple *tuple)
+{
+	tuple->src.u.all = 0;
+	tuple->dst.u.all = 0;
+
+	return 1;
+}
+
+static int generic_invert_tuple(struct nf_conntrack_tuple *tuple,
+				const struct nf_conntrack_tuple *orig)
+{
+	tuple->src.u.all = 0;
+	tuple->dst.u.all = 0;
+
+	return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int generic_print_tuple(struct seq_file *s,
+			       const struct nf_conntrack_tuple *tuple)
+{
+	return 0;
+}
+
+/* Print out the private part of the conntrack. */
+static int generic_print_conntrack(struct seq_file *s,
+				   const struct nf_conn *state)
+{
+	return 0;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int packet(struct nf_conn *conntrack,
+		  const struct sk_buff *skb,
+		  unsigned int dataoff,
+		  enum ip_conntrack_info ctinfo,
+		  int pf,
+		  unsigned int hooknum)
+{
+	nf_ct_refresh_acct(conntrack, ctinfo, skb, nf_ct_generic_timeout);
+	return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int new(struct nf_conn *conntrack, const struct sk_buff *skb,
+	       unsigned int dataoff)
+{
+	return 1;
+}
+
+struct nf_conntrack_protocol nf_conntrack_generic_protocol =
+{
+	.l3proto		= PF_UNSPEC,
+	.proto			= 0,
+	.name			= "unknown",
+	.pkt_to_tuple		= generic_pkt_to_tuple,
+	.invert_tuple		= generic_invert_tuple,
+	.print_tuple		= generic_print_tuple,
+	.print_conntrack	= generic_print_conntrack,
+	.packet			= packet,
+	.new			= new,
+};
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
new file mode 100644
index 0000000..3a600f7
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -0,0 +1,670 @@
+/*
+ * Connection tracking protocol helper module for SCTP.
+ * 
+ * SCTP is defined in RFC 2960. References to various sections in this code 
+ * are to this RFC.
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 17 Oct 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- enable working with L3 protocol independent connection tracking.
+ *
+ * Derived from net/ipv4/ip_conntrack_sctp.c
+ */
+
+/*
+ * Added support for proc manipulation of timeouts.
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/sctp.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+
+#if 0
+#define DEBUGP(format, ...) printk(format, ## __VA_ARGS__)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+/* Protects conntrack->proto.sctp */
+static DEFINE_RWLOCK(sctp_lock);
+
+/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
+   closely.  They're more complex. --RR 
+
+   And so for me for SCTP :D -Kiran */
+
+static const char *sctp_conntrack_names[] = {
+	"NONE",
+	"CLOSED",
+	"COOKIE_WAIT",
+	"COOKIE_ECHOED",
+	"ESTABLISHED",
+	"SHUTDOWN_SENT",
+	"SHUTDOWN_RECD",
+	"SHUTDOWN_ACK_SENT",
+};
+
+#define SECS  * HZ
+#define MINS  * 60 SECS
+#define HOURS * 60 MINS
+#define DAYS  * 24 HOURS
+
+static unsigned long nf_ct_sctp_timeout_closed            =  10 SECS;
+static unsigned long nf_ct_sctp_timeout_cookie_wait       =   3 SECS;
+static unsigned long nf_ct_sctp_timeout_cookie_echoed     =   3 SECS;
+static unsigned long nf_ct_sctp_timeout_established       =   5 DAYS;
+static unsigned long nf_ct_sctp_timeout_shutdown_sent     = 300 SECS / 1000;
+static unsigned long nf_ct_sctp_timeout_shutdown_recd     = 300 SECS / 1000;
+static unsigned long nf_ct_sctp_timeout_shutdown_ack_sent =   3 SECS;
+
+static unsigned long * sctp_timeouts[]
+= { NULL,                                  /* SCTP_CONNTRACK_NONE  */
+    &nf_ct_sctp_timeout_closed,	           /* SCTP_CONNTRACK_CLOSED */
+    &nf_ct_sctp_timeout_cookie_wait,       /* SCTP_CONNTRACK_COOKIE_WAIT */
+    &nf_ct_sctp_timeout_cookie_echoed,     /* SCTP_CONNTRACK_COOKIE_ECHOED */
+    &nf_ct_sctp_timeout_established,       /* SCTP_CONNTRACK_ESTABLISHED */
+    &nf_ct_sctp_timeout_shutdown_sent,     /* SCTP_CONNTRACK_SHUTDOWN_SENT */
+    &nf_ct_sctp_timeout_shutdown_recd,     /* SCTP_CONNTRACK_SHUTDOWN_RECD */
+    &nf_ct_sctp_timeout_shutdown_ack_sent  /* SCTP_CONNTRACK_SHUTDOWN_ACK_SENT */
+ };
+
+#define sNO SCTP_CONNTRACK_NONE
+#define	sCL SCTP_CONNTRACK_CLOSED
+#define	sCW SCTP_CONNTRACK_COOKIE_WAIT
+#define	sCE SCTP_CONNTRACK_COOKIE_ECHOED
+#define	sES SCTP_CONNTRACK_ESTABLISHED
+#define	sSS SCTP_CONNTRACK_SHUTDOWN_SENT
+#define	sSR SCTP_CONNTRACK_SHUTDOWN_RECD
+#define	sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+#define	sIV SCTP_CONNTRACK_MAX
+
+/* 
+	These are the descriptions of the states:
+
+NOTE: These state names are tantalizingly similar to the states of an 
+SCTP endpoint. But the interpretation of the states is a little different,
+considering that these are the states of the connection and not of an end 
+point. Please note the subtleties. -Kiran
+
+NONE              - Nothing so far.
+COOKIE WAIT       - We have seen an INIT chunk in the original direction, or also 
+                    an INIT_ACK chunk in the reply direction.
+COOKIE ECHOED     - We have seen a COOKIE_ECHO chunk in the original direction.
+ESTABLISHED       - We have seen a COOKIE_ACK in the reply direction.
+SHUTDOWN_SENT     - We have seen a SHUTDOWN chunk in the original direction.
+SHUTDOWN_RECD     - We have seen a SHUTDOWN chunk in the reply directoin.
+SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
+                    to that of the SHUTDOWN chunk.
+CLOSED            - We have seen a SHUTDOWN_COMPLETE chunk in the direction of 
+                    the SHUTDOWN chunk. Connection is closed.
+*/
+
+/* TODO
+ - I have assumed that the first INIT is in the original direction. 
+ This messes things when an INIT comes in the reply direction in CLOSED
+ state.
+ - Check the error type in the reply dir before transitioning from 
+cookie echoed to closed.
+ - Sec 5.2.4 of RFC 2960
+ - Multi Homing support.
+*/
+
+/* SCTP conntrack state transitions */
+static enum sctp_conntrack sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
+	{
+/*	ORIGINAL	*/
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
+/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA},
+/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},
+/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
+/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/
+/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
+/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+	},
+	{
+/*	REPLY	*/
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
+/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */
+/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},
+/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
+/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
+/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */
+/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+	}
+};
+
+static int sctp_pkt_to_tuple(const struct sk_buff *skb,
+			     unsigned int dataoff,
+			     struct nf_conntrack_tuple *tuple)
+{
+	sctp_sctphdr_t _hdr, *hp;
+
+	DEBUGP(__FUNCTION__);
+	DEBUGP("\n");
+
+	/* Actually only need first 8 bytes. */
+	hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
+	if (hp == NULL)
+		return 0;
+
+	tuple->src.u.sctp.port = hp->source;
+	tuple->dst.u.sctp.port = hp->dest;
+	return 1;
+}
+
+static int sctp_invert_tuple(struct nf_conntrack_tuple *tuple,
+			     const struct nf_conntrack_tuple *orig)
+{
+	DEBUGP(__FUNCTION__);
+	DEBUGP("\n");
+
+	tuple->src.u.sctp.port = orig->dst.u.sctp.port;
+	tuple->dst.u.sctp.port = orig->src.u.sctp.port;
+	return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int sctp_print_tuple(struct seq_file *s,
+			    const struct nf_conntrack_tuple *tuple)
+{
+	DEBUGP(__FUNCTION__);
+	DEBUGP("\n");
+
+	return seq_printf(s, "sport=%hu dport=%hu ",
+			  ntohs(tuple->src.u.sctp.port),
+			  ntohs(tuple->dst.u.sctp.port));
+}
+
+/* Print out the private part of the conntrack. */
+static int sctp_print_conntrack(struct seq_file *s,
+				const struct nf_conn *conntrack)
+{
+	enum sctp_conntrack state;
+
+	DEBUGP(__FUNCTION__);
+	DEBUGP("\n");
+
+	read_lock_bh(&sctp_lock);
+	state = conntrack->proto.sctp.state;
+	read_unlock_bh(&sctp_lock);
+
+	return seq_printf(s, "%s ", sctp_conntrack_names[state]);
+}
+
+#define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count)	\
+for (offset = dataoff + sizeof(sctp_sctphdr_t), count = 0;		\
+	offset < skb->len &&						\
+	(sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch));	\
+	offset += (htons(sch->length) + 3) & ~3, count++)
+
+/* Some validity checks to make sure the chunks are fine */
+static int do_basic_checks(struct nf_conn *conntrack,
+			   const struct sk_buff *skb,
+			   unsigned int dataoff,
+			   char *map)
+{
+	u_int32_t offset, count;
+	sctp_chunkhdr_t _sch, *sch;
+	int flag;
+
+	DEBUGP(__FUNCTION__);
+	DEBUGP("\n");
+
+	flag = 0;
+
+	for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+		DEBUGP("Chunk Num: %d  Type: %d\n", count, sch->type);
+
+		if (sch->type == SCTP_CID_INIT 
+			|| sch->type == SCTP_CID_INIT_ACK
+			|| sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
+			flag = 1;
+		}
+
+		/* Cookie Ack/Echo chunks not the first OR 
+		   Init / Init Ack / Shutdown compl chunks not the only chunks */
+		if ((sch->type == SCTP_CID_COOKIE_ACK 
+			|| sch->type == SCTP_CID_COOKIE_ECHO
+			|| flag)
+		     && count !=0 ) {
+			DEBUGP("Basic checks failed\n");
+			return 1;
+		}
+
+		if (map) {
+			set_bit(sch->type, (void *)map);
+		}
+	}
+
+	DEBUGP("Basic checks passed\n");
+	return 0;
+}
+
+static int new_state(enum ip_conntrack_dir dir,
+		     enum sctp_conntrack cur_state,
+		     int chunk_type)
+{
+	int i;
+
+	DEBUGP(__FUNCTION__);
+	DEBUGP("\n");
+
+	DEBUGP("Chunk type: %d\n", chunk_type);
+
+	switch (chunk_type) {
+		case SCTP_CID_INIT: 
+			DEBUGP("SCTP_CID_INIT\n");
+			i = 0; break;
+		case SCTP_CID_INIT_ACK: 
+			DEBUGP("SCTP_CID_INIT_ACK\n");
+			i = 1; break;
+		case SCTP_CID_ABORT: 
+			DEBUGP("SCTP_CID_ABORT\n");
+			i = 2; break;
+		case SCTP_CID_SHUTDOWN: 
+			DEBUGP("SCTP_CID_SHUTDOWN\n");
+			i = 3; break;
+		case SCTP_CID_SHUTDOWN_ACK: 
+			DEBUGP("SCTP_CID_SHUTDOWN_ACK\n");
+			i = 4; break;
+		case SCTP_CID_ERROR: 
+			DEBUGP("SCTP_CID_ERROR\n");
+			i = 5; break;
+		case SCTP_CID_COOKIE_ECHO: 
+			DEBUGP("SCTP_CID_COOKIE_ECHO\n");
+			i = 6; break;
+		case SCTP_CID_COOKIE_ACK: 
+			DEBUGP("SCTP_CID_COOKIE_ACK\n");
+			i = 7; break;
+		case SCTP_CID_SHUTDOWN_COMPLETE: 
+			DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n");
+			i = 8; break;
+		default:
+			/* Other chunks like DATA, SACK, HEARTBEAT and
+			its ACK do not cause a change in state */
+			DEBUGP("Unknown chunk type, Will stay in %s\n", 
+						sctp_conntrack_names[cur_state]);
+			return cur_state;
+	}
+
+	DEBUGP("dir: %d   cur_state: %s  chunk_type: %d  new_state: %s\n", 
+			dir, sctp_conntrack_names[cur_state], chunk_type,
+			sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]);
+
+	return sctp_conntracks[dir][i][cur_state];
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int sctp_packet(struct nf_conn *conntrack,
+		       const struct sk_buff *skb,
+		       unsigned int dataoff,
+		       enum ip_conntrack_info ctinfo,
+		       int pf,
+		       unsigned int hooknum)
+{
+	enum sctp_conntrack newconntrack, oldsctpstate;
+	sctp_sctphdr_t _sctph, *sh;
+	sctp_chunkhdr_t _sch, *sch;
+	u_int32_t offset, count;
+	char map[256 / sizeof (char)] = {0};
+
+	DEBUGP(__FUNCTION__);
+	DEBUGP("\n");
+
+	sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
+	if (sh == NULL)
+		return -1;
+
+	if (do_basic_checks(conntrack, skb, dataoff, map) != 0)
+		return -1;
+
+	/* Check the verification tag (Sec 8.5) */
+	if (!test_bit(SCTP_CID_INIT, (void *)map)
+		&& !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, (void *)map)
+		&& !test_bit(SCTP_CID_COOKIE_ECHO, (void *)map)
+		&& !test_bit(SCTP_CID_ABORT, (void *)map)
+		&& !test_bit(SCTP_CID_SHUTDOWN_ACK, (void *)map)
+		&& (sh->vtag != conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) {
+		DEBUGP("Verification tag check failed\n");
+		return -1;
+	}
+
+	oldsctpstate = newconntrack = SCTP_CONNTRACK_MAX;
+	for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+		write_lock_bh(&sctp_lock);
+
+		/* Special cases of Verification tag check (Sec 8.5.1) */
+		if (sch->type == SCTP_CID_INIT) {
+			/* Sec 8.5.1 (A) */
+			if (sh->vtag != 0) {
+				write_unlock_bh(&sctp_lock);
+				return -1;
+			}
+		} else if (sch->type == SCTP_CID_ABORT) {
+			/* Sec 8.5.1 (B) */
+			if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])
+				&& !(sh->vtag == conntrack->proto.sctp.vtag
+							[1 - CTINFO2DIR(ctinfo)])) {
+				write_unlock_bh(&sctp_lock);
+				return -1;
+			}
+		} else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
+			/* Sec 8.5.1 (C) */
+			if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])
+				&& !(sh->vtag == conntrack->proto.sctp.vtag
+							[1 - CTINFO2DIR(ctinfo)] 
+					&& (sch->flags & 1))) {
+				write_unlock_bh(&sctp_lock);
+				return -1;
+			}
+		} else if (sch->type == SCTP_CID_COOKIE_ECHO) {
+			/* Sec 8.5.1 (D) */
+			if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) {
+				write_unlock_bh(&sctp_lock);
+				return -1;
+			}
+		}
+
+		oldsctpstate = conntrack->proto.sctp.state;
+		newconntrack = new_state(CTINFO2DIR(ctinfo), oldsctpstate, sch->type);
+
+		/* Invalid */
+		if (newconntrack == SCTP_CONNTRACK_MAX) {
+			DEBUGP("nf_conntrack_sctp: Invalid dir=%i ctype=%u conntrack=%u\n",
+			       CTINFO2DIR(ctinfo), sch->type, oldsctpstate);
+			write_unlock_bh(&sctp_lock);
+			return -1;
+		}
+
+		/* If it is an INIT or an INIT ACK note down the vtag */
+		if (sch->type == SCTP_CID_INIT 
+			|| sch->type == SCTP_CID_INIT_ACK) {
+			sctp_inithdr_t _inithdr, *ih;
+
+			ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
+			                        sizeof(_inithdr), &_inithdr);
+			if (ih == NULL) {
+					write_unlock_bh(&sctp_lock);
+					return -1;
+			}
+			DEBUGP("Setting vtag %x for dir %d\n", 
+					ih->init_tag, !CTINFO2DIR(ctinfo));
+			conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag;
+		}
+
+		conntrack->proto.sctp.state = newconntrack;
+		if (oldsctpstate != newconntrack)
+			nf_conntrack_event_cache(IPCT_PROTOINFO, skb);
+		write_unlock_bh(&sctp_lock);
+	}
+
+	nf_ct_refresh_acct(conntrack, ctinfo, skb, *sctp_timeouts[newconntrack]);
+
+	if (oldsctpstate == SCTP_CONNTRACK_COOKIE_ECHOED
+		&& CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY
+		&& newconntrack == SCTP_CONNTRACK_ESTABLISHED) {
+		DEBUGP("Setting assured bit\n");
+		set_bit(IPS_ASSURED_BIT, &conntrack->status);
+		nf_conntrack_event_cache(IPCT_STATUS, skb);
+	}
+
+	return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
+		    unsigned int dataoff)
+{
+	enum sctp_conntrack newconntrack;
+	sctp_sctphdr_t _sctph, *sh;
+	sctp_chunkhdr_t _sch, *sch;
+	u_int32_t offset, count;
+	char map[256 / sizeof (char)] = {0};
+
+	DEBUGP(__FUNCTION__);
+	DEBUGP("\n");
+
+	sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
+	if (sh == NULL)
+		return 0;
+
+	if (do_basic_checks(conntrack, skb, dataoff, map) != 0)
+		return 0;
+
+	/* If an OOTB packet has any of these chunks discard (Sec 8.4) */
+	if ((test_bit (SCTP_CID_ABORT, (void *)map))
+		|| (test_bit (SCTP_CID_SHUTDOWN_COMPLETE, (void *)map))
+		|| (test_bit (SCTP_CID_COOKIE_ACK, (void *)map))) {
+		return 0;
+	}
+
+	newconntrack = SCTP_CONNTRACK_MAX;
+	for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+		/* Don't need lock here: this conntrack not in circulation yet */
+		newconntrack = new_state(IP_CT_DIR_ORIGINAL, 
+					 SCTP_CONNTRACK_NONE, sch->type);
+
+		/* Invalid: delete conntrack */
+		if (newconntrack == SCTP_CONNTRACK_MAX) {
+			DEBUGP("nf_conntrack_sctp: invalid new deleting.\n");
+			return 0;
+		}
+
+		/* Copy the vtag into the state info */
+		if (sch->type == SCTP_CID_INIT) {
+			if (sh->vtag == 0) {
+				sctp_inithdr_t _inithdr, *ih;
+
+				ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
+				                        sizeof(_inithdr), &_inithdr);
+				if (ih == NULL)
+					return 0;
+
+				DEBUGP("Setting vtag %x for new conn\n", 
+					ih->init_tag);
+
+				conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = 
+								ih->init_tag;
+			} else {
+				/* Sec 8.5.1 (A) */
+				return 0;
+			}
+		}
+		/* If it is a shutdown ack OOTB packet, we expect a return
+		   shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
+		else {
+			DEBUGP("Setting vtag %x for new conn OOTB\n", 
+				sh->vtag);
+			conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
+		}
+
+		conntrack->proto.sctp.state = newconntrack;
+	}
+
+	return 1;
+}
+
+struct nf_conntrack_protocol nf_conntrack_protocol_sctp4 = { 
+	.l3proto	 = PF_INET,
+	.proto 		 = IPPROTO_SCTP, 
+	.name 		 = "sctp",
+	.pkt_to_tuple 	 = sctp_pkt_to_tuple, 
+	.invert_tuple 	 = sctp_invert_tuple, 
+	.print_tuple 	 = sctp_print_tuple, 
+	.print_conntrack = sctp_print_conntrack,
+	.packet 	 = sctp_packet, 
+	.new 		 = sctp_new, 
+	.destroy 	 = NULL, 
+	.me 		 = THIS_MODULE 
+};
+
+struct nf_conntrack_protocol nf_conntrack_protocol_sctp6 = { 
+	.l3proto	 = PF_INET6,
+	.proto 		 = IPPROTO_SCTP, 
+	.name 		 = "sctp",
+	.pkt_to_tuple 	 = sctp_pkt_to_tuple, 
+	.invert_tuple 	 = sctp_invert_tuple, 
+	.print_tuple 	 = sctp_print_tuple, 
+	.print_conntrack = sctp_print_conntrack,
+	.packet 	 = sctp_packet, 
+	.new 		 = sctp_new, 
+	.destroy 	 = NULL, 
+	.me 		 = THIS_MODULE 
+};
+
+#ifdef CONFIG_SYSCTL
+static ctl_table nf_ct_sysctl_table[] = {
+	{
+		.ctl_name	= NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED,
+		.procname	= "nf_conntrack_sctp_timeout_closed",
+		.data		= &nf_ct_sctp_timeout_closed,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT,
+		.procname	= "nf_conntrack_sctp_timeout_cookie_wait",
+		.data		= &nf_ct_sctp_timeout_cookie_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED,
+		.procname	= "nf_conntrack_sctp_timeout_cookie_echoed",
+		.data		= &nf_ct_sctp_timeout_cookie_echoed,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED,
+		.procname	= "nf_conntrack_sctp_timeout_established",
+		.data		= &nf_ct_sctp_timeout_established,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT,
+		.procname	= "nf_conntrack_sctp_timeout_shutdown_sent",
+		.data		= &nf_ct_sctp_timeout_shutdown_sent,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD,
+		.procname	= "nf_conntrack_sctp_timeout_shutdown_recd",
+		.data		= &nf_ct_sctp_timeout_shutdown_recd,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT,
+		.procname	= "nf_conntrack_sctp_timeout_shutdown_ack_sent",
+		.data		= &nf_ct_sctp_timeout_shutdown_ack_sent,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_netfilter_table[] = {
+	{
+		.ctl_name	= NET_NETFILTER,
+		.procname	= "netfilter",
+		.mode		= 0555,
+		.child		= nf_ct_sysctl_table,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_net_table[] = {
+	{
+		.ctl_name	= CTL_NET,
+		.procname	= "net",
+		.mode		= 0555, 
+		.child		= nf_ct_netfilter_table,
+	},
+	{ .ctl_name = 0 }
+};
+
+static struct ctl_table_header *nf_ct_sysctl_header;
+#endif
+
+int __init init(void)
+{
+	int ret;
+
+	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_sctp4);
+	if (ret) {
+		printk("nf_conntrack_proto_sctp4: protocol register failed\n");
+		goto out;
+	}
+	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_sctp6);
+	if (ret) {
+		printk("nf_conntrack_proto_sctp6: protocol register failed\n");
+		goto cleanup_sctp4;
+	}
+
+#ifdef CONFIG_SYSCTL
+	nf_ct_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
+	if (nf_ct_sysctl_header == NULL) {
+		printk("nf_conntrack_proto_sctp: can't register to sysctl.\n");
+		goto cleanup;
+	}
+#endif
+
+	return ret;
+
+#ifdef CONFIG_SYSCTL
+ cleanup:
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp6);
+#endif
+ cleanup_sctp4:
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp4);
+ out:
+	DEBUGP("SCTP conntrack module loading %s\n", 
+					ret ? "failed": "succeeded");
+	return ret;
+}
+
+void __exit fini(void)
+{
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp6);
+	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp4);
+#ifdef CONFIG_SYSCTL
+ 	unregister_sysctl_table(nf_ct_sysctl_header);
+#endif
+	DEBUGP("SCTP conntrack module unloaded\n");
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kiran Kumar Immidi");
+MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP");
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
new file mode 100644
index 0000000..83d90dd
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -0,0 +1,1162 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>:
+ *	- Real stateful connection tracking
+ *	- Modified state transitions table
+ *	- Window scaling support added
+ *	- SACK support added
+ *
+ * Willy Tarreau:
+ *	- State table bugfixes
+ *	- More robust state changes
+ *	- Tuning timer parameters
+ *
+ * 27 Oct 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- genelized Layer 3 protocol part.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+ *
+ * version 2.2
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+
+#if 0
+#define DEBUGP printk
+#define DEBUGP_VARS
+#else
+#define DEBUGP(format, args...)
+#endif
+
+/* Protects conntrack->proto.tcp */
+static DEFINE_RWLOCK(tcp_lock);
+
+/* "Be conservative in what you do, 
+    be liberal in what you accept from others." 
+    If it's non-zero, we mark only out of window RST segments as INVALID. */
+int nf_ct_tcp_be_liberal = 0;
+
+/* When connection is picked up from the middle, how many packets are required
+   to pass in each direction when we assume we are in sync - if any side uses
+   window scaling, we lost the game. 
+   If it is set to zero, we disable picking up already established 
+   connections. */
+int nf_ct_tcp_loose = 3;
+
+/* Max number of the retransmitted packets without receiving an (acceptable) 
+   ACK from the destination. If this number is reached, a shorter timer 
+   will be started. */
+int nf_ct_tcp_max_retrans = 3;
+
+  /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
+     closely.  They're more complex. --RR */
+
+static const char *tcp_conntrack_names[] = {
+	"NONE",
+	"SYN_SENT",
+	"SYN_RECV",
+	"ESTABLISHED",
+	"FIN_WAIT",
+	"CLOSE_WAIT",
+	"LAST_ACK",
+	"TIME_WAIT",
+	"CLOSE",
+	"LISTEN"
+};
+  
+#define SECS * HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+#define DAYS * 24 HOURS
+
+unsigned long nf_ct_tcp_timeout_syn_sent =      2 MINS;
+unsigned long nf_ct_tcp_timeout_syn_recv =     60 SECS;
+unsigned long nf_ct_tcp_timeout_established =   5 DAYS;
+unsigned long nf_ct_tcp_timeout_fin_wait =      2 MINS;
+unsigned long nf_ct_tcp_timeout_close_wait =   60 SECS;
+unsigned long nf_ct_tcp_timeout_last_ack =     30 SECS;
+unsigned long nf_ct_tcp_timeout_time_wait =     2 MINS;
+unsigned long nf_ct_tcp_timeout_close =        10 SECS;
+
+/* RFC1122 says the R2 limit should be at least 100 seconds.
+   Linux uses 15 packets as limit, which corresponds 
+   to ~13-30min depending on RTO. */
+unsigned long nf_ct_tcp_timeout_max_retrans =     5 MINS;
+ 
+static unsigned long * tcp_timeouts[]
+= { NULL,                              /* TCP_CONNTRACK_NONE */
+    &nf_ct_tcp_timeout_syn_sent,       /* TCP_CONNTRACK_SYN_SENT, */
+    &nf_ct_tcp_timeout_syn_recv,       /* TCP_CONNTRACK_SYN_RECV, */
+    &nf_ct_tcp_timeout_established,    /* TCP_CONNTRACK_ESTABLISHED, */
+    &nf_ct_tcp_timeout_fin_wait,       /* TCP_CONNTRACK_FIN_WAIT, */
+    &nf_ct_tcp_timeout_close_wait,     /* TCP_CONNTRACK_CLOSE_WAIT, */
+    &nf_ct_tcp_timeout_last_ack,       /* TCP_CONNTRACK_LAST_ACK, */
+    &nf_ct_tcp_timeout_time_wait,      /* TCP_CONNTRACK_TIME_WAIT, */
+    &nf_ct_tcp_timeout_close,          /* TCP_CONNTRACK_CLOSE, */
+    NULL,                              /* TCP_CONNTRACK_LISTEN */
+ };
+ 
+#define sNO TCP_CONNTRACK_NONE
+#define sSS TCP_CONNTRACK_SYN_SENT
+#define sSR TCP_CONNTRACK_SYN_RECV
+#define sES TCP_CONNTRACK_ESTABLISHED
+#define sFW TCP_CONNTRACK_FIN_WAIT
+#define sCW TCP_CONNTRACK_CLOSE_WAIT
+#define sLA TCP_CONNTRACK_LAST_ACK
+#define sTW TCP_CONNTRACK_TIME_WAIT
+#define sCL TCP_CONNTRACK_CLOSE
+#define sLI TCP_CONNTRACK_LISTEN
+#define sIV TCP_CONNTRACK_MAX
+#define sIG TCP_CONNTRACK_IGNORE
+
+/* What TCP flags are set from RST/SYN/FIN/ACK. */
+enum tcp_bit_set {
+	TCP_SYN_SET,
+	TCP_SYNACK_SET,
+	TCP_FIN_SET,
+	TCP_ACK_SET,
+	TCP_RST_SET,
+	TCP_NONE_SET,
+};
+  
+/*
+ * The TCP state transition table needs a few words...
+ *
+ * We are the man in the middle. All the packets go through us
+ * but might get lost in transit to the destination.
+ * It is assumed that the destinations can't receive segments 
+ * we haven't seen.
+ *
+ * The checked segment is in window, but our windows are *not*
+ * equivalent with the ones of the sender/receiver. We always
+ * try to guess the state of the current sender.
+ *
+ * The meaning of the states are:
+ *
+ * NONE:	initial state
+ * SYN_SENT:	SYN-only packet seen 
+ * SYN_RECV:	SYN-ACK packet seen
+ * ESTABLISHED:	ACK packet seen
+ * FIN_WAIT:	FIN packet seen
+ * CLOSE_WAIT:	ACK seen (after FIN) 
+ * LAST_ACK:	FIN seen (after FIN)
+ * TIME_WAIT:	last ACK seen
+ * CLOSE:	closed connection
+ *
+ * LISTEN state is not used.
+ *
+ * Packets marked as IGNORED (sIG):
+ *	if they may be either invalid or valid 
+ *	and the receiver may send back a connection 
+ *	closing RST or a SYN/ACK.
+ *
+ * Packets marked as INVALID (sIV):
+ *	if they are invalid
+ *	or we do not support the request (simultaneous open)
+ */
+static enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
+	{
+/* ORIGINAL */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*syn*/	   { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sIV },
+/*
+ *	sNO -> sSS	Initialize a new connection
+ *	sSS -> sSS	Retransmitted SYN
+ *	sSR -> sIG	Late retransmitted SYN?
+ *	sES -> sIG	Error: SYNs in window outside the SYN_SENT state
+ *			are errors. Receiver will reply with RST 
+ *			and close the connection.
+ *			Or we are not in sync and hold a dead connection.
+ *	sFW -> sIG
+ *	sCW -> sIG
+ *	sLA -> sIG
+ *	sTW -> sSS	Reopened connection (RFC 1122).
+ *	sCL -> sSS
+ */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV },
+/*
+ * A SYN/ACK from the client is always invalid:
+ *	- either it tries to set up a simultaneous open, which is 
+ *	  not supported;
+ *	- or the firewall has just been inserted between the two hosts
+ *	  during the session set-up. The SYN will be retransmitted 
+ *	  by the true client (or it'll time out).
+ */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+/*
+ *	sNO -> sIV	Too late and no reason to do anything...
+ *	sSS -> sIV	Client migth not send FIN in this state:
+ *			we enforce waiting for a SYN/ACK reply first.
+ *	sSR -> sFW	Close started.
+ *	sES -> sFW
+ *	sFW -> sLA	FIN seen in both directions, waiting for
+ *			the last ACK. 
+ *			Migth be a retransmitted FIN as well...
+ *	sCW -> sLA
+ *	sLA -> sLA	Retransmitted FIN. Remain in the same state.
+ *	sTW -> sTW
+ *	sCL -> sCL
+ */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*ack*/	   { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
+/*
+ *	sNO -> sES	Assumed.
+ *	sSS -> sIV	ACK is invalid: we haven't seen a SYN/ACK yet.
+ *	sSR -> sES	Established state is reached.
+ *	sES -> sES	:-)
+ *	sFW -> sCW	Normal close request answered by ACK.
+ *	sCW -> sCW
+ *	sLA -> sTW	Last ACK detected.
+ *	sTW -> sTW	Retransmitted last ACK. Remain in the same state.
+ *	sCL -> sCL
+ */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV },
+/*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+	},
+	{
+/* REPLY */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*syn*/	   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV },
+/*
+ *	sNO -> sIV	Never reached.
+ *	sSS -> sIV	Simultaneous open, not supported
+ *	sSR -> sIV	Simultaneous open, not supported.
+ *	sES -> sIV	Server may not initiate a connection.
+ *	sFW -> sIV
+ *	sCW -> sIV
+ *	sLA -> sIV
+ *	sTW -> sIV	Reopened connection, but server may not do it.
+ *	sCL -> sIV
+ */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIV },
+/*
+ *	sSS -> sSR	Standard open.
+ *	sSR -> sSR	Retransmitted SYN/ACK.
+ *	sES -> sIG	Late retransmitted SYN/ACK?
+ *	sFW -> sIG	Might be SYN/ACK answering ignored SYN
+ *	sCW -> sIG
+ *	sLA -> sIG
+ *	sTW -> sIG
+ *	sCL -> sIG
+ */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+/*
+ *	sSS -> sIV	Server might not send FIN in this state.
+ *	sSR -> sFW	Close started.
+ *	sES -> sFW
+ *	sFW -> sLA	FIN seen in both directions.
+ *	sCW -> sLA
+ *	sLA -> sLA	Retransmitted FIN.
+ *	sTW -> sTW
+ *	sCL -> sCL
+ */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*ack*/	   { sIV, sIV, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIV },
+/*
+ *	sSS -> sIV	Might be a half-open connection.
+ *	sSR -> sSR	Might answer late resent SYN.
+ *	sES -> sES	:-)
+ *	sFW -> sCW	Normal close request answered by ACK.
+ *	sCW -> sCW
+ *	sLA -> sTW	Last ACK detected.
+ *	sTW -> sTW	Retransmitted last ACK.
+ *	sCL -> sCL
+ */
+/* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI	*/
+/*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV },
+/*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+  	}
+};
+
+static int tcp_pkt_to_tuple(const struct sk_buff *skb,
+			    unsigned int dataoff,
+			    struct nf_conntrack_tuple *tuple)
+{
+	struct tcphdr _hdr, *hp;
+
+	/* Actually only need first 8 bytes. */
+	hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
+	if (hp == NULL)
+		return 0;
+
+	tuple->src.u.tcp.port = hp->source;
+	tuple->dst.u.tcp.port = hp->dest;
+
+	return 1;
+}
+
+static int tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
+			    const struct nf_conntrack_tuple *orig)
+{
+	tuple->src.u.tcp.port = orig->dst.u.tcp.port;
+	tuple->dst.u.tcp.port = orig->src.u.tcp.port;
+	return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int tcp_print_tuple(struct seq_file *s,
+			   const struct nf_conntrack_tuple *tuple)
+{
+	return seq_printf(s, "sport=%hu dport=%hu ",
+			  ntohs(tuple->src.u.tcp.port),
+			  ntohs(tuple->dst.u.tcp.port));
+}
+
+/* Print out the private part of the conntrack. */
+static int tcp_print_conntrack(struct seq_file *s,
+			       const struct nf_conn *conntrack)
+{
+	enum tcp_conntrack state;
+
+	read_lock_bh(&tcp_lock);
+	state = conntrack->proto.tcp.state;
+	read_unlock_bh(&tcp_lock);
+
+	return seq_printf(s, "%s ", tcp_conntrack_names[state]);
+}
+
+static unsigned int get_conntrack_index(const struct tcphdr *tcph)
+{
+	if (tcph->rst) return TCP_RST_SET;
+	else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
+	else if (tcph->fin) return TCP_FIN_SET;
+	else if (tcph->ack) return TCP_ACK_SET;
+	else return TCP_NONE_SET;
+}
+
+/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
+   in IP Filter' by Guido van Rooij.
+   
+   http://www.nluug.nl/events/sane2000/papers.html
+   http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz
+   
+   The boundaries and the conditions are changed according to RFC793:
+   the packet must intersect the window (i.e. segments may be
+   after the right or before the left edge) and thus receivers may ACK
+   segments after the right edge of the window.
+
+   	td_maxend = max(sack + max(win,1)) seen in reply packets
+	td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
+	td_maxwin += seq + len - sender.td_maxend
+			if seq + len > sender.td_maxend
+	td_end    = max(seq + len) seen in sent packets
+   
+   I.   Upper bound for valid data:	seq <= sender.td_maxend
+   II.  Lower bound for valid data:	seq + len >= sender.td_end - receiver.td_maxwin
+   III.	Upper bound for valid ack:      sack <= receiver.td_end
+   IV.	Lower bound for valid ack:	ack >= receiver.td_end - MAXACKWINDOW
+
+   where sack is the highest right edge of sack block found in the packet.
+
+   The upper bound limit for a valid ack is not ignored - 
+   we doesn't have to deal with fragments. 
+*/
+
+static inline __u32 segment_seq_plus_len(__u32 seq,
+					 size_t len,
+					 unsigned int dataoff,
+					 struct tcphdr *tcph)
+{
+	/* XXX Should I use payload length field in IP/IPv6 header ?
+	 * - YK */
+	return (seq + len - dataoff - tcph->doff*4
+		+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
+}
+  
+/* Fixme: what about big packets? */
+#define MAXACKWINCONST			66000
+#define MAXACKWINDOW(sender)						\
+	((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin	\
+					      : MAXACKWINCONST)
+  
+/*
+ * Simplified tcp_parse_options routine from tcp_input.c
+ */
+static void tcp_options(const struct sk_buff *skb,
+			unsigned int dataoff,
+			struct tcphdr *tcph, 
+			struct ip_ct_tcp_state *state)
+{
+	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
+	unsigned char *ptr;
+	int length = (tcph->doff*4) - sizeof(struct tcphdr);
+
+	if (!length)
+		return;
+
+	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
+				 length, buff);
+	BUG_ON(ptr == NULL);
+
+	state->td_scale = 
+	state->flags = 0;
+
+	while (length > 0) {
+		int opcode=*ptr++;
+		int opsize;
+
+		switch (opcode) {
+		case TCPOPT_EOL:
+			return;
+		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
+			length--;
+			continue;
+		default:
+			opsize=*ptr++;
+			if (opsize < 2) /* "silly options" */
+				return;
+			if (opsize > length)
+				break;	/* don't parse partial options */
+
+			if (opcode == TCPOPT_SACK_PERM 
+			    && opsize == TCPOLEN_SACK_PERM)
+				state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
+			else if (opcode == TCPOPT_WINDOW
+				 && opsize == TCPOLEN_WINDOW) {
+				state->td_scale = *(u_int8_t *)ptr;
+
+				if (state->td_scale > 14) {
+					/* See RFC1323 */
+					state->td_scale = 14;
+				}
+				state->flags |=
+					IP_CT_TCP_FLAG_WINDOW_SCALE;
+			}
+			ptr += opsize - 2;
+			length -= opsize;
+		}
+	}
+}
+
+static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
+		     struct tcphdr *tcph, __u32 *sack)
+{
+        unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
+	unsigned char *ptr;
+	int length = (tcph->doff*4) - sizeof(struct tcphdr);
+	__u32 tmp;
+
+	if (!length)
+		return;
+
+	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
+				 length, buff);
+	BUG_ON(ptr == NULL);
+
+	/* Fast path for timestamp-only option */
+	if (length == TCPOLEN_TSTAMP_ALIGNED*4
+	    && *(__u32 *)ptr ==
+	        __constant_ntohl((TCPOPT_NOP << 24) 
+	        		 | (TCPOPT_NOP << 16)
+	        		 | (TCPOPT_TIMESTAMP << 8)
+	        		 | TCPOLEN_TIMESTAMP))
+		return;
+
+	while (length > 0) {
+		int opcode = *ptr++;
+		int opsize, i;
+
+		switch (opcode) {
+		case TCPOPT_EOL:
+			return;
+		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
+			length--;
+			continue;
+		default:
+			opsize = *ptr++;
+			if (opsize < 2) /* "silly options" */
+				return;
+			if (opsize > length)
+				break;	/* don't parse partial options */
+
+			if (opcode == TCPOPT_SACK 
+			    && opsize >= (TCPOLEN_SACK_BASE 
+			    		  + TCPOLEN_SACK_PERBLOCK)
+			    && !((opsize - TCPOLEN_SACK_BASE) 
+			    	 % TCPOLEN_SACK_PERBLOCK)) {
+			    	for (i = 0;
+			    	     i < (opsize - TCPOLEN_SACK_BASE);
+			    	     i += TCPOLEN_SACK_PERBLOCK) {
+					memcpy(&tmp, (__u32 *)(ptr + i) + 1,
+					       sizeof(__u32));
+					tmp = ntohl(tmp);
+
+					if (after(tmp, *sack))
+						*sack = tmp;
+				}
+				return;
+			}
+			ptr += opsize - 2;
+			length -= opsize;
+		}
+	}
+}
+
+static int tcp_in_window(struct ip_ct_tcp *state, 
+                         enum ip_conntrack_dir dir,
+                         unsigned int index,
+                         const struct sk_buff *skb,
+			 unsigned int dataoff,
+                         struct tcphdr *tcph,
+			 int pf)
+{
+	struct ip_ct_tcp_state *sender = &state->seen[dir];
+	struct ip_ct_tcp_state *receiver = &state->seen[!dir];
+	__u32 seq, ack, sack, end, win, swin;
+	int res;
+
+	/*
+	 * Get the required data from the packet.
+	 */
+	seq = ntohl(tcph->seq);
+	ack = sack = ntohl(tcph->ack_seq);
+	win = ntohs(tcph->window);
+	end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
+
+	if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
+		tcp_sack(skb, dataoff, tcph, &sack);
+
+	DEBUGP("tcp_in_window: START\n");
+	DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
+	       "seq=%u ack=%u sack=%u win=%u end=%u\n",
+		NIPQUAD(iph->saddr), ntohs(tcph->source), 
+		NIPQUAD(iph->daddr), ntohs(tcph->dest),
+		seq, ack, sack, win, end);
+	DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
+	       "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+		sender->td_end, sender->td_maxend, sender->td_maxwin,
+		sender->td_scale, 
+		receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 
+		receiver->td_scale);
+
+	if (sender->td_end == 0) {
+		/*
+		 * Initialize sender data.
+		 */
+		if (tcph->syn && tcph->ack) {
+			/*
+			 * Outgoing SYN-ACK in reply to a SYN.
+			 */
+			sender->td_end = 
+			sender->td_maxend = end;
+			sender->td_maxwin = (win == 0 ? 1 : win);
+
+			tcp_options(skb, dataoff, tcph, sender);
+			/* 
+			 * RFC 1323:
+			 * Both sides must send the Window Scale option
+			 * to enable window scaling in either direction.
+			 */
+			if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
+			      && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
+				sender->td_scale = 
+				receiver->td_scale = 0;
+		} else {
+			/*
+			 * We are in the middle of a connection,
+			 * its history is lost for us.
+			 * Let's try to use the data from the packet.
+		 	 */
+			sender->td_end = end;
+			sender->td_maxwin = (win == 0 ? 1 : win);
+			sender->td_maxend = end + sender->td_maxwin;
+		}
+	} else if (((state->state == TCP_CONNTRACK_SYN_SENT
+		     && dir == IP_CT_DIR_ORIGINAL)
+		   || (state->state == TCP_CONNTRACK_SYN_RECV
+		     && dir == IP_CT_DIR_REPLY))
+		   && after(end, sender->td_end)) {
+		/*
+		 * RFC 793: "if a TCP is reinitialized ... then it need
+		 * not wait at all; it must only be sure to use sequence 
+		 * numbers larger than those recently used."
+		 */
+		sender->td_end =
+		sender->td_maxend = end;
+		sender->td_maxwin = (win == 0 ? 1 : win);
+
+		tcp_options(skb, dataoff, tcph, sender);
+	}
+
+	if (!(tcph->ack)) {
+		/*
+		 * If there is no ACK, just pretend it was set and OK.
+		 */
+		ack = sack = receiver->td_end;
+	} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) == 
+		    (TCP_FLAG_ACK|TCP_FLAG_RST)) 
+		   && (ack == 0)) {
+		/*
+		 * Broken TCP stacks, that set ACK in RST packets as well
+		 * with zero ack value.
+		 */
+		ack = sack = receiver->td_end;
+	}
+
+	if (seq == end
+	    && (!tcph->rst
+		|| (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
+		/*
+		 * Packets contains no data: we assume it is valid
+		 * and check the ack value only.
+		 * However RST segments are always validated by their
+		 * SEQ number, except when seq == 0 (reset sent answering
+		 * SYN.
+		 */
+		seq = end = sender->td_end;
+
+	DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
+	       "seq=%u ack=%u sack =%u win=%u end=%u\n",
+		NIPQUAD(iph->saddr), ntohs(tcph->source),
+		NIPQUAD(iph->daddr), ntohs(tcph->dest),
+		seq, ack, sack, win, end);
+	DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
+	       "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+		sender->td_end, sender->td_maxend, sender->td_maxwin,
+		sender->td_scale, 
+		receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+		receiver->td_scale);
+
+	DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
+		before(seq, sender->td_maxend + 1),
+		after(end, sender->td_end - receiver->td_maxwin - 1),
+	    	before(sack, receiver->td_end + 1),
+	    	after(ack, receiver->td_end - MAXACKWINDOW(sender)));
+
+	if (sender->loose || receiver->loose ||
+	    (before(seq, sender->td_maxend + 1) &&
+	     after(end, sender->td_end - receiver->td_maxwin - 1) &&
+	     before(sack, receiver->td_end + 1) &&
+	     after(ack, receiver->td_end - MAXACKWINDOW(sender)))) {
+	    	/*
+		 * Take into account window scaling (RFC 1323).
+		 */
+		if (!tcph->syn)
+			win <<= sender->td_scale;
+
+		/*
+		 * Update sender data.
+		 */
+		swin = win + (sack - ack);
+		if (sender->td_maxwin < swin)
+			sender->td_maxwin = swin;
+		if (after(end, sender->td_end))
+			sender->td_end = end;
+		/*
+		 * Update receiver data.
+		 */
+		if (after(end, sender->td_maxend))
+			receiver->td_maxwin += end - sender->td_maxend;
+		if (after(sack + win, receiver->td_maxend - 1)) {
+			receiver->td_maxend = sack + win;
+			if (win == 0)
+				receiver->td_maxend++;
+		}
+
+		/* 
+		 * Check retransmissions.
+		 */
+		if (index == TCP_ACK_SET) {
+			if (state->last_dir == dir
+			    && state->last_seq == seq
+			    && state->last_ack == ack
+			    && state->last_end == end)
+				state->retrans++;
+			else {
+				state->last_dir = dir;
+				state->last_seq = seq;
+				state->last_ack = ack;
+				state->last_end = end;
+				state->retrans = 0;
+			}
+		}
+		/*
+		 * Close the window of disabled window tracking :-)
+		 */
+		if (sender->loose)
+			sender->loose--;
+
+		res = 1;
+	} else {
+		if (LOG_INVALID(IPPROTO_TCP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+			"nf_ct_tcp: %s ",
+			before(seq, sender->td_maxend + 1) ?
+			after(end, sender->td_end - receiver->td_maxwin - 1) ?
+			before(sack, receiver->td_end + 1) ?
+			after(ack, receiver->td_end - MAXACKWINDOW(sender)) ? "BUG"
+			: "ACK is under the lower bound (possible overly delayed ACK)"
+			: "ACK is over the upper bound (ACKed data not seen yet)"
+			: "SEQ is under the lower bound (already ACKed data retransmitted)"
+			: "SEQ is over the upper bound (over the window of the receiver)");
+
+		res = nf_ct_tcp_be_liberal;
+  	}
+  
+	DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
+	       "receiver end=%u maxend=%u maxwin=%u\n",
+		res, sender->td_end, sender->td_maxend, sender->td_maxwin, 
+		receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
+
+	return res;
+}
+
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+/* Update sender->td_end after NAT successfully mangled the packet */
+/* Caller must linearize skb at tcp header. */
+void nf_conntrack_tcp_update(struct sk_buff *skb,
+			     unsigned int dataoff,
+			     struct nf_conn *conntrack, 
+			     int dir)
+{
+	struct tcphdr *tcph = (void *)skb->data + dataoff;
+	__u32 end;
+#ifdef DEBUGP_VARS
+	struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir];
+	struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[!dir];
+#endif
+
+	end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph);
+
+	write_lock_bh(&tcp_lock);
+	/*
+	 * We have to worry for the ack in the reply packet only...
+	 */
+	if (after(end, conntrack->proto.tcp.seen[dir].td_end))
+		conntrack->proto.tcp.seen[dir].td_end = end;
+	conntrack->proto.tcp.last_end = end;
+	write_unlock_bh(&tcp_lock);
+	DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
+	       "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+		sender->td_end, sender->td_maxend, sender->td_maxwin,
+		sender->td_scale, 
+		receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+		receiver->td_scale);
+}
+ 
+#endif
+
+#define	TH_FIN	0x01
+#define	TH_SYN	0x02
+#define	TH_RST	0x04
+#define	TH_PUSH	0x08
+#define	TH_ACK	0x10
+#define	TH_URG	0x20
+#define	TH_ECE	0x40
+#define	TH_CWR	0x80
+
+/* table of valid flag combinations - ECE and CWR are always valid */
+static u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] =
+{
+	[TH_SYN]			= 1,
+	[TH_SYN|TH_ACK]			= 1,
+	[TH_SYN|TH_ACK|TH_PUSH]		= 1,
+	[TH_RST]			= 1,
+	[TH_RST|TH_ACK]			= 1,
+	[TH_RST|TH_ACK|TH_PUSH]		= 1,
+	[TH_FIN|TH_ACK]			= 1,
+	[TH_ACK]			= 1,
+	[TH_ACK|TH_PUSH]		= 1,
+	[TH_ACK|TH_URG]			= 1,
+	[TH_ACK|TH_URG|TH_PUSH]		= 1,
+	[TH_FIN|TH_ACK|TH_PUSH]		= 1,
+	[TH_FIN|TH_ACK|TH_URG]		= 1,
+	[TH_FIN|TH_ACK|TH_URG|TH_PUSH]	= 1,
+};
+
+/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
+static int tcp_error(struct sk_buff *skb,
+		     unsigned int dataoff,
+		     enum ip_conntrack_info *ctinfo,
+		     int pf,
+		     unsigned int hooknum,
+		     int(*csum)(const struct sk_buff *,unsigned int))
+{
+	struct tcphdr _tcph, *th;
+	unsigned int tcplen = skb->len - dataoff;
+	u_int8_t tcpflags;
+
+	/* Smaller that minimal TCP header? */
+	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
+	if (th == NULL) {
+		if (LOG_INVALID(IPPROTO_TCP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+				"nf_ct_tcp: short packet ");
+		return -NF_ACCEPT;
+  	}
+  
+	/* Not whole TCP header or malformed packet */
+	if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
+		if (LOG_INVALID(IPPROTO_TCP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+				"nf_ct_tcp: truncated/malformed packet ");
+		return -NF_ACCEPT;
+	}
+  
+	/* Checksum invalid? Ignore.
+	 * We skip checking packets on the outgoing path
+	 * because the semantic of CHECKSUM_HW is different there 
+	 * and moreover root might send raw packets.
+	 */
+	/* FIXME: Source route IP option packets --RR */
+	if (((pf == PF_INET && hooknum == NF_IP_PRE_ROUTING) ||
+	     (pf == PF_INET6 && hooknum  == NF_IP6_PRE_ROUTING))
+	    && skb->ip_summed != CHECKSUM_UNNECESSARY
+	    && csum(skb, dataoff)) {
+		if (LOG_INVALID(IPPROTO_TCP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+				  "nf_ct_tcp: bad TCP checksum ");
+		return -NF_ACCEPT;
+	}
+
+	/* Check TCP flags. */
+	tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR));
+	if (!tcp_valid_flags[tcpflags]) {
+		if (LOG_INVALID(IPPROTO_TCP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+				  "nf_ct_tcp: invalid TCP flag combination ");
+		return -NF_ACCEPT;
+	}
+
+	return NF_ACCEPT;
+}
+
+static int csum4(const struct sk_buff *skb, unsigned int dataoff)
+{
+	return csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
+				 skb->len - dataoff, IPPROTO_TCP,
+			         skb->ip_summed == CHECKSUM_HW ? skb->csum
+			      	 : skb_checksum(skb, dataoff,
+						skb->len - dataoff, 0));
+}
+
+static int csum6(const struct sk_buff *skb, unsigned int dataoff)
+{
+	return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
+			       skb->len - dataoff, IPPROTO_TCP,
+			       skb->ip_summed == CHECKSUM_HW ? skb->csum
+			       : skb_checksum(skb, dataoff, skb->len - dataoff,
+					      0));
+}
+
+static int tcp_error4(struct sk_buff *skb,
+		      unsigned int dataoff,
+		      enum ip_conntrack_info *ctinfo,
+		      int pf,
+		      unsigned int hooknum)
+{
+	return tcp_error(skb, dataoff, ctinfo, pf, hooknum, csum4);
+}
+
+static int tcp_error6(struct sk_buff *skb,
+		      unsigned int dataoff,
+		      enum ip_conntrack_info *ctinfo,
+		      int pf,
+		      unsigned int hooknum)
+{
+	return tcp_error(skb, dataoff, ctinfo, pf, hooknum, csum6);
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int tcp_packet(struct nf_conn *conntrack,
+		      const struct sk_buff *skb,
+		      unsigned int dataoff,
+		      enum ip_conntrack_info ctinfo,
+		      int pf,
+		      unsigned int hooknum)
+{
+	enum tcp_conntrack new_state, old_state;
+	enum ip_conntrack_dir dir;
+	struct tcphdr *th, _tcph;
+	unsigned long timeout;
+	unsigned int index;
+
+	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
+	BUG_ON(th == NULL);
+
+	write_lock_bh(&tcp_lock);
+	old_state = conntrack->proto.tcp.state;
+	dir = CTINFO2DIR(ctinfo);
+	index = get_conntrack_index(th);
+	new_state = tcp_conntracks[dir][index][old_state];
+
+	switch (new_state) {
+	case TCP_CONNTRACK_IGNORE:
+		/* Either SYN in ORIGINAL
+		 * or SYN/ACK in REPLY. */
+		if (index == TCP_SYNACK_SET
+		    && conntrack->proto.tcp.last_index == TCP_SYN_SET
+		    && conntrack->proto.tcp.last_dir != dir
+		    && ntohl(th->ack_seq) ==
+		    	     conntrack->proto.tcp.last_end) {
+			/* This SYN/ACK acknowledges a SYN that we earlier 
+			 * ignored as invalid. This means that the client and
+			 * the server are both in sync, while the firewall is
+			 * not. We kill this session and block the SYN/ACK so
+			 * that the client cannot but retransmit its SYN and 
+			 * thus initiate a clean new session.
+			 */
+		    	write_unlock_bh(&tcp_lock);
+			if (LOG_INVALID(IPPROTO_TCP))
+				nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+					  "nf_ct_tcp: killing out of sync session ");
+		    	if (del_timer(&conntrack->timeout))
+		    		conntrack->timeout.function((unsigned long)
+		    					    conntrack);
+		    	return -NF_DROP;
+		}
+		conntrack->proto.tcp.last_index = index;
+		conntrack->proto.tcp.last_dir = dir;
+		conntrack->proto.tcp.last_seq = ntohl(th->seq);
+		conntrack->proto.tcp.last_end =
+		    segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
+
+		write_unlock_bh(&tcp_lock);
+		if (LOG_INVALID(IPPROTO_TCP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+				  "nf_ct_tcp: invalid packed ignored ");
+		return NF_ACCEPT;
+	case TCP_CONNTRACK_MAX:
+		/* Invalid packet */
+		DEBUGP("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
+		       dir, get_conntrack_index(th),
+		       old_state);
+		write_unlock_bh(&tcp_lock);
+		if (LOG_INVALID(IPPROTO_TCP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+				  "nf_ct_tcp: invalid state ");
+		return -NF_ACCEPT;
+	case TCP_CONNTRACK_SYN_SENT:
+		if (old_state < TCP_CONNTRACK_TIME_WAIT)
+			break;
+		if ((conntrack->proto.tcp.seen[dir].flags &
+			IP_CT_TCP_FLAG_CLOSE_INIT)
+		    || after(ntohl(th->seq),
+			     conntrack->proto.tcp.seen[dir].td_end)) {
+		    	/* Attempt to reopen a closed connection.
+		    	* Delete this connection and look up again. */
+		    	write_unlock_bh(&tcp_lock);
+		    	if (del_timer(&conntrack->timeout))
+		    		conntrack->timeout.function((unsigned long)
+		    					    conntrack);
+		    	return -NF_REPEAT;
+		}
+	case TCP_CONNTRACK_CLOSE:
+		if (index == TCP_RST_SET
+		    && test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
+		    && conntrack->proto.tcp.last_index == TCP_SYN_SET
+		    && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) {
+			/* RST sent to invalid SYN we had let trough
+			 * SYN was in window then, tear down connection.
+			 * We skip window checking, because packet might ACK
+			 * segments we ignored in the SYN. */
+			goto in_window;
+		}
+		/* Just fall trough */
+	default:
+		/* Keep compilers happy. */
+		break;
+	}
+
+	if (!tcp_in_window(&conntrack->proto.tcp, dir, index,
+			   skb, dataoff, th, pf)) {
+		write_unlock_bh(&tcp_lock);
+		return -NF_ACCEPT;
+	}
+     in_window:
+	/* From now on we have got in-window packets */
+	conntrack->proto.tcp.last_index = index;
+
+	DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
+	       "syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
+		NIPQUAD(iph->saddr), ntohs(th->source),
+		NIPQUAD(iph->daddr), ntohs(th->dest),
+		(th->syn ? 1 : 0), (th->ack ? 1 : 0),
+		(th->fin ? 1 : 0), (th->rst ? 1 : 0),
+		old_state, new_state);
+
+	conntrack->proto.tcp.state = new_state;
+	if (old_state != new_state
+	    && (new_state == TCP_CONNTRACK_FIN_WAIT
+		|| new_state == TCP_CONNTRACK_CLOSE))
+		conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
+	timeout = conntrack->proto.tcp.retrans >= nf_ct_tcp_max_retrans
+		  && *tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans
+		  ? nf_ct_tcp_timeout_max_retrans : *tcp_timeouts[new_state];
+	write_unlock_bh(&tcp_lock);
+
+	nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
+	if (new_state != old_state)
+		nf_conntrack_event_cache(IPCT_PROTOINFO, skb);
+
+	if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
+		/* If only reply is a RST, we can consider ourselves not to
+		   have an established connection: this is a fairly common
+		   problem case, so we can delete the conntrack
+		   immediately.  --RR */
+		if (th->rst) {
+			if (del_timer(&conntrack->timeout))
+				conntrack->timeout.function((unsigned long)
+							    conntrack);
+			return NF_ACCEPT;
+		}
+	} else if (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
+		   && (old_state == TCP_CONNTRACK_SYN_RECV
+		       || old_state == TCP_CONNTRACK_ESTABLISHED)
+		   && new_state == TCP_CONNTRACK_ESTABLISHED) {
+		/* Set ASSURED if we see see valid ack in ESTABLISHED 
+		   after SYN_RECV or a valid answer for a picked up 
+		   connection. */
+		set_bit(IPS_ASSURED_BIT, &conntrack->status);
+		nf_conntrack_event_cache(IPCT_STATUS, skb);
+	}
+	nf_ct_refresh_acct(conntrack, ctinfo, skb, timeout);
+
+	return NF_ACCEPT;
+}
+ 
+/* Called when a new connection for this protocol found. */
+static int tcp_new(struct nf_conn *conntrack,
+		   const struct sk_buff *skb,
+		   unsigned int dataoff)
+{
+	enum tcp_conntrack new_state;
+	struct tcphdr *th, _tcph;
+#ifdef DEBUGP_VARS
+	struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[0];
+	struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[1];
+#endif
+
+	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
+	BUG_ON(th == NULL);
+
+	/* Don't need lock here: this conntrack not in circulation yet */
+	new_state
+		= tcp_conntracks[0][get_conntrack_index(th)]
+		[TCP_CONNTRACK_NONE];
+
+	/* Invalid: delete conntrack */
+	if (new_state >= TCP_CONNTRACK_MAX) {
+		DEBUGP("nf_ct_tcp: invalid new deleting.\n");
+		return 0;
+	}
+
+	if (new_state == TCP_CONNTRACK_SYN_SENT) {
+		/* SYN packet */
+		conntrack->proto.tcp.seen[0].td_end =
+			segment_seq_plus_len(ntohl(th->seq), skb->len,
+					     dataoff, th);
+		conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
+		if (conntrack->proto.tcp.seen[0].td_maxwin == 0)
+			conntrack->proto.tcp.seen[0].td_maxwin = 1;
+		conntrack->proto.tcp.seen[0].td_maxend =
+			conntrack->proto.tcp.seen[0].td_end;
+
+		tcp_options(skb, dataoff, th, &conntrack->proto.tcp.seen[0]);
+		conntrack->proto.tcp.seen[1].flags = 0;
+		conntrack->proto.tcp.seen[0].loose = 
+		conntrack->proto.tcp.seen[1].loose = 0;
+	} else if (nf_ct_tcp_loose == 0) {
+		/* Don't try to pick up connections. */
+		return 0;
+	} else {
+		/*
+		 * We are in the middle of a connection,
+		 * its history is lost for us.
+		 * Let's try to use the data from the packet.
+		 */
+		conntrack->proto.tcp.seen[0].td_end =
+			segment_seq_plus_len(ntohl(th->seq), skb->len,
+					     dataoff, th);
+		conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
+		if (conntrack->proto.tcp.seen[0].td_maxwin == 0)
+			conntrack->proto.tcp.seen[0].td_maxwin = 1;
+		conntrack->proto.tcp.seen[0].td_maxend =
+			conntrack->proto.tcp.seen[0].td_end + 
+			conntrack->proto.tcp.seen[0].td_maxwin;
+		conntrack->proto.tcp.seen[0].td_scale = 0;
+
+		/* We assume SACK. Should we assume window scaling too? */
+		conntrack->proto.tcp.seen[0].flags =
+		conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM;
+		conntrack->proto.tcp.seen[0].loose = 
+		conntrack->proto.tcp.seen[1].loose = nf_ct_tcp_loose;
+	}
+    
+	conntrack->proto.tcp.seen[1].td_end = 0;
+	conntrack->proto.tcp.seen[1].td_maxend = 0;
+	conntrack->proto.tcp.seen[1].td_maxwin = 1;
+	conntrack->proto.tcp.seen[1].td_scale = 0;      
+
+	/* tcp_packet will set them */
+	conntrack->proto.tcp.state = TCP_CONNTRACK_NONE;
+	conntrack->proto.tcp.last_index = TCP_NONE_SET;
+	 
+	DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
+	       "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+		sender->td_end, sender->td_maxend, sender->td_maxwin,
+		sender->td_scale, 
+		receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+		receiver->td_scale);
+	return 1;
+}
+  
+struct nf_conntrack_protocol nf_conntrack_protocol_tcp4 =
+{
+	.l3proto		= PF_INET,
+	.proto 			= IPPROTO_TCP,
+	.name 			= "tcp",
+	.pkt_to_tuple 		= tcp_pkt_to_tuple,
+	.invert_tuple 		= tcp_invert_tuple,
+	.print_tuple 		= tcp_print_tuple,
+	.print_conntrack 	= tcp_print_conntrack,
+	.packet 		= tcp_packet,
+	.new 			= tcp_new,
+	.error			= tcp_error4,
+};
+
+struct nf_conntrack_protocol nf_conntrack_protocol_tcp6 =
+{
+	.l3proto		= PF_INET6,
+	.proto 			= IPPROTO_TCP,
+	.name 			= "tcp",
+	.pkt_to_tuple 		= tcp_pkt_to_tuple,
+	.invert_tuple 		= tcp_invert_tuple,
+	.print_tuple 		= tcp_print_tuple,
+	.print_conntrack 	= tcp_print_conntrack,
+	.packet 		= tcp_packet,
+	.new 			= tcp_new,
+	.error			= tcp_error6,
+};
+
+EXPORT_SYMBOL(nf_conntrack_protocol_tcp4);
+EXPORT_SYMBOL(nf_conntrack_protocol_tcp6);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
new file mode 100644
index 0000000..3cae7ce
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -0,0 +1,216 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- enable working with Layer 3 protocol independent connection tracking.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_proto_udp.c
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/udp.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/checksum.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+
+unsigned long nf_ct_udp_timeout = 30*HZ;
+unsigned long nf_ct_udp_timeout_stream = 180*HZ;
+
+static int udp_pkt_to_tuple(const struct sk_buff *skb,
+			     unsigned int dataoff,
+			     struct nf_conntrack_tuple *tuple)
+{
+	struct udphdr _hdr, *hp;
+
+	/* Actually only need first 8 bytes. */
+	hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+	if (hp == NULL)
+		return 0;
+
+	tuple->src.u.udp.port = hp->source;
+	tuple->dst.u.udp.port = hp->dest;
+
+	return 1;
+}
+
+static int udp_invert_tuple(struct nf_conntrack_tuple *tuple,
+			    const struct nf_conntrack_tuple *orig)
+{
+	tuple->src.u.udp.port = orig->dst.u.udp.port;
+	tuple->dst.u.udp.port = orig->src.u.udp.port;
+	return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int udp_print_tuple(struct seq_file *s,
+			   const struct nf_conntrack_tuple *tuple)
+{
+	return seq_printf(s, "sport=%hu dport=%hu ",
+			  ntohs(tuple->src.u.udp.port),
+			  ntohs(tuple->dst.u.udp.port));
+}
+
+/* Print out the private part of the conntrack. */
+static int udp_print_conntrack(struct seq_file *s,
+			       const struct nf_conn *conntrack)
+{
+	return 0;
+}
+
+/* Returns verdict for packet, and may modify conntracktype */
+static int udp_packet(struct nf_conn *conntrack,
+		      const struct sk_buff *skb,
+		      unsigned int dataoff,
+		      enum ip_conntrack_info ctinfo,
+		      int pf,
+		      unsigned int hooknum)
+{
+	/* If we've seen traffic both ways, this is some kind of UDP
+	   stream.  Extend timeout. */
+	if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
+		nf_ct_refresh_acct(conntrack, ctinfo, skb,
+				   nf_ct_udp_timeout_stream);
+		/* Also, more likely to be important, and not a probe */
+		if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status))
+			nf_conntrack_event_cache(IPCT_STATUS, skb);
+	} else
+		nf_ct_refresh_acct(conntrack, ctinfo, skb, nf_ct_udp_timeout);
+
+	return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int udp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
+		   unsigned int dataoff)
+{
+	return 1;
+}
+
+static int udp_error(struct sk_buff *skb, unsigned int dataoff,
+		     enum ip_conntrack_info *ctinfo,
+		     int pf,
+		     unsigned int hooknum,
+		     int (*csum)(const struct sk_buff *, unsigned int))
+{
+	unsigned int udplen = skb->len - dataoff;
+	struct udphdr _hdr, *hdr;
+
+	/* Header is too small? */
+	hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+	if (hdr == NULL) {
+		if (LOG_INVALID(IPPROTO_UDP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+				      "nf_ct_udp: short packet ");
+		return -NF_ACCEPT;
+	}
+
+	/* Truncated/malformed packets */
+	if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
+		if (LOG_INVALID(IPPROTO_UDP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+				"nf_ct_udp: truncated/malformed packet ");
+		return -NF_ACCEPT;
+	}
+
+	/* Packet with no checksum */
+	if (!hdr->check)
+		return NF_ACCEPT;
+
+	/* Checksum invalid? Ignore.
+	 * We skip checking packets on the outgoing path
+	 * because the semantic of CHECKSUM_HW is different there
+	 * and moreover root might send raw packets.
+	 * FIXME: Source route IP option packets --RR */
+	if (((pf == PF_INET && hooknum == NF_IP_PRE_ROUTING) ||
+	     (pf == PF_INET6 && hooknum == NF_IP6_PRE_ROUTING))
+	    && skb->ip_summed != CHECKSUM_UNNECESSARY
+	    && csum(skb, dataoff)) {
+		if (LOG_INVALID(IPPROTO_UDP))
+			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+				"nf_ct_udp: bad UDP checksum ");
+		return -NF_ACCEPT;
+	}
+
+	return NF_ACCEPT;
+}
+
+static int csum4(const struct sk_buff *skb, unsigned int dataoff)
+{
+	return csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
+				 skb->len - dataoff, IPPROTO_UDP,
+				 skb->ip_summed == CHECKSUM_HW ? skb->csum
+				 : skb_checksum(skb, dataoff,
+						skb->len - dataoff, 0));
+}
+
+static int csum6(const struct sk_buff *skb, unsigned int dataoff)
+{
+	return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
+			       skb->len - dataoff, IPPROTO_UDP,
+			       skb->ip_summed == CHECKSUM_HW ? skb->csum
+			       : skb_checksum(skb, dataoff, skb->len - dataoff,
+					      0));
+}
+
+static int udp_error4(struct sk_buff *skb,
+		      unsigned int dataoff,
+		      enum ip_conntrack_info *ctinfo,
+		      int pf,
+		      unsigned int hooknum)
+{
+	return udp_error(skb, dataoff, ctinfo, pf, hooknum, csum4);
+}
+
+static int udp_error6(struct sk_buff *skb,
+		      unsigned int dataoff,
+		      enum ip_conntrack_info *ctinfo,
+		      int pf,
+		      unsigned int hooknum)
+{
+	return udp_error(skb, dataoff, ctinfo, pf, hooknum, csum6);
+}
+
+struct nf_conntrack_protocol nf_conntrack_protocol_udp4 =
+{
+	.l3proto		= PF_INET,
+	.proto			= IPPROTO_UDP,
+	.name			= "udp",
+	.pkt_to_tuple		= udp_pkt_to_tuple,
+	.invert_tuple		= udp_invert_tuple,
+	.print_tuple		= udp_print_tuple,
+	.print_conntrack	= udp_print_conntrack,
+	.packet			= udp_packet,
+	.new			= udp_new,
+	.error			= udp_error4,
+};
+
+struct nf_conntrack_protocol nf_conntrack_protocol_udp6 =
+{
+	.l3proto		= PF_INET6,
+	.proto			= IPPROTO_UDP,
+	.name			= "udp",
+	.pkt_to_tuple		= udp_pkt_to_tuple,
+	.invert_tuple		= udp_invert_tuple,
+	.print_tuple		= udp_print_tuple,
+	.print_conntrack	= udp_print_conntrack,
+	.packet			= udp_packet,
+	.new			= udp_new,
+	.error			= udp_error6,
+};
+
+EXPORT_SYMBOL(nf_conntrack_protocol_udp4);
+EXPORT_SYMBOL(nf_conntrack_protocol_udp6);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
new file mode 100644
index 0000000..45224db
--- /dev/null
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -0,0 +1,869 @@
+/* This file contains all the functions required for the standalone
+   nf_conntrack module.
+
+   These are not required by the compatibility layer.
+*/
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- generalize L3 protocol dependent part.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_standalone.c
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/percpu.h>
+#include <linux/netdevice.h>
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+#endif
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter_ipv4/listhelp.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+MODULE_LICENSE("GPL");
+
+extern atomic_t nf_conntrack_count;
+DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
+
+static int kill_l3proto(struct nf_conn *i, void *data)
+{
+	return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == 
+			((struct nf_conntrack_l3proto *)data)->l3proto);
+}
+
+static int kill_proto(struct nf_conn *i, void *data)
+{
+	struct nf_conntrack_protocol *proto;
+	proto = (struct nf_conntrack_protocol *)data;
+	return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == 
+			proto->proto) &&
+	       (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
+			proto->l3proto);
+}
+
+#ifdef CONFIG_PROC_FS
+static int
+print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
+	    struct nf_conntrack_l3proto *l3proto,
+	    struct nf_conntrack_protocol *proto)
+{
+	return l3proto->print_tuple(s, tuple) || proto->print_tuple(s, tuple);
+}
+
+#ifdef CONFIG_NF_CT_ACCT
+static unsigned int
+seq_print_counters(struct seq_file *s,
+		   const struct ip_conntrack_counter *counter)
+{
+	return seq_printf(s, "packets=%llu bytes=%llu ",
+			  (unsigned long long)counter->packets,
+			  (unsigned long long)counter->bytes);
+}
+#else
+#define seq_print_counters(x, y)	0
+#endif
+
+struct ct_iter_state {
+	unsigned int bucket;
+};
+
+static struct list_head *ct_get_first(struct seq_file *seq)
+{
+	struct ct_iter_state *st = seq->private;
+
+	for (st->bucket = 0;
+	     st->bucket < nf_conntrack_htable_size;
+	     st->bucket++) {
+		if (!list_empty(&nf_conntrack_hash[st->bucket]))
+			return nf_conntrack_hash[st->bucket].next;
+	}
+	return NULL;
+}
+
+static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head)
+{
+	struct ct_iter_state *st = seq->private;
+
+	head = head->next;
+	while (head == &nf_conntrack_hash[st->bucket]) {
+		if (++st->bucket >= nf_conntrack_htable_size)
+			return NULL;
+		head = nf_conntrack_hash[st->bucket].next;
+	}
+	return head;
+}
+
+static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos)
+{
+	struct list_head *head = ct_get_first(seq);
+
+	if (head)
+		while (pos && (head = ct_get_next(seq, head)))
+			pos--;
+	return pos ? NULL : head;
+}
+
+static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	read_lock_bh(&nf_conntrack_lock);
+	return ct_get_idx(seq, *pos);
+}
+
+static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return ct_get_next(s, v);
+}
+
+static void ct_seq_stop(struct seq_file *s, void *v)
+{
+	read_unlock_bh(&nf_conntrack_lock);
+}
+
+/* return 0 on success, 1 in case of error */
+static int ct_seq_show(struct seq_file *s, void *v)
+{
+	const struct nf_conntrack_tuple_hash *hash = v;
+	const struct nf_conn *conntrack = nf_ct_tuplehash_to_ctrack(hash);
+	struct nf_conntrack_l3proto *l3proto;
+	struct nf_conntrack_protocol *proto;
+
+	ASSERT_READ_LOCK(&nf_conntrack_lock);
+	NF_CT_ASSERT(conntrack);
+
+	/* we only want to print DIR_ORIGINAL */
+	if (NF_CT_DIRECTION(hash))
+		return 0;
+
+	l3proto = nf_ct_find_l3proto(conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
+				     .tuple.src.l3num);
+
+	NF_CT_ASSERT(l3proto);
+	proto = nf_ct_find_proto(conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
+				 .tuple.src.l3num,
+				 conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
+				 .tuple.dst.protonum);
+	NF_CT_ASSERT(proto);
+
+	if (seq_printf(s, "%-8s %u %-8s %u %ld ",
+		       l3proto->name,
+		       conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num,
+		       proto->name,
+		       conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
+		       timer_pending(&conntrack->timeout)
+		       ? (long)(conntrack->timeout.expires - jiffies)/HZ : 0) != 0)
+		return -ENOSPC;
+
+	if (l3proto->print_conntrack(s, conntrack))
+		return -ENOSPC;
+
+	if (proto->print_conntrack(s, conntrack))
+		return -ENOSPC;
+
+	if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+			l3proto, proto))
+		return -ENOSPC;
+
+	if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL]))
+		return -ENOSPC;
+
+	if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
+		if (seq_printf(s, "[UNREPLIED] "))
+			return -ENOSPC;
+
+	if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
+			l3proto, proto))
+		return -ENOSPC;
+
+	if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY]))
+		return -ENOSPC;
+
+	if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
+		if (seq_printf(s, "[ASSURED] "))
+			return -ENOSPC;
+
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+	if (seq_printf(s, "mark=%u ", conntrack->mark))
+		return -ENOSPC;
+#endif
+
+	if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use)))
+		return -ENOSPC;
+	
+	return 0;
+}
+
+static struct seq_operations ct_seq_ops = {
+	.start = ct_seq_start,
+	.next  = ct_seq_next,
+	.stop  = ct_seq_stop,
+	.show  = ct_seq_show
+};
+
+static int ct_open(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq;
+	struct ct_iter_state *st;
+	int ret;
+
+	st = kmalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
+	if (st == NULL)
+		return -ENOMEM;
+	ret = seq_open(file, &ct_seq_ops);
+	if (ret)
+		goto out_free;
+	seq          = file->private_data;
+	seq->private = st;
+	memset(st, 0, sizeof(struct ct_iter_state));
+	return ret;
+out_free:
+	kfree(st);
+	return ret;
+}
+
+static struct file_operations ct_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = ct_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private,
+};
+
+/* expects */
+static void *exp_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct list_head *e = &nf_conntrack_expect_list;
+	loff_t i;
+
+	/* strange seq_file api calls stop even if we fail,
+	 * thus we need to grab lock since stop unlocks */
+	read_lock_bh(&nf_conntrack_lock);
+
+	if (list_empty(e))
+		return NULL;
+
+	for (i = 0; i <= *pos; i++) {
+		e = e->next;
+		if (e == &nf_conntrack_expect_list)
+			return NULL;
+	}
+	return e;
+}
+
+static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct list_head *e = v;
+
+	++*pos;
+	e = e->next;
+
+	if (e == &nf_conntrack_expect_list)
+		return NULL;
+
+	return e;
+}
+
+static void exp_seq_stop(struct seq_file *s, void *v)
+{
+	read_unlock_bh(&nf_conntrack_lock);
+}
+
+static int exp_seq_show(struct seq_file *s, void *v)
+{
+	struct nf_conntrack_expect *expect = v;
+
+	if (expect->timeout.function)
+		seq_printf(s, "%ld ", timer_pending(&expect->timeout)
+			   ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
+	else
+		seq_printf(s, "- ");
+	seq_printf(s, "l3proto = %u proto=%u ",
+		   expect->tuple.src.l3num,
+		   expect->tuple.dst.protonum);
+	print_tuple(s, &expect->tuple,
+		    nf_ct_find_l3proto(expect->tuple.src.l3num),
+		    nf_ct_find_proto(expect->tuple.src.l3num,
+				     expect->tuple.dst.protonum));
+	return seq_putc(s, '\n');
+}
+
+static struct seq_operations exp_seq_ops = {
+	.start = exp_seq_start,
+	.next = exp_seq_next,
+	.stop = exp_seq_stop,
+	.show = exp_seq_show
+};
+
+static int exp_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &exp_seq_ops);
+}
+
+static struct file_operations exp_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = exp_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+
+static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	int cpu;
+
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+		if (!cpu_possible(cpu))
+			continue;
+		*pos = cpu + 1;
+		return &per_cpu(nf_conntrack_stat, cpu);
+	}
+
+	return NULL;
+}
+
+static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int cpu;
+
+	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+		if (!cpu_possible(cpu))
+			continue;
+		*pos = cpu + 1;
+		return &per_cpu(nf_conntrack_stat, cpu);
+	}
+
+	return NULL;
+}
+
+static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int ct_cpu_seq_show(struct seq_file *seq, void *v)
+{
+	unsigned int nr_conntracks = atomic_read(&nf_conntrack_count);
+	struct ip_conntrack_stat *st = v;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete\n");
+		return 0;
+	}
+
+	seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
+			"%08x %08x %08x %08x %08x  %08x %08x %08x \n",
+		   nr_conntracks,
+		   st->searched,
+		   st->found,
+		   st->new,
+		   st->invalid,
+		   st->ignore,
+		   st->delete,
+		   st->delete_list,
+		   st->insert,
+		   st->insert_failed,
+		   st->drop,
+		   st->early_drop,
+		   st->error,
+
+		   st->expect_new,
+		   st->expect_create,
+		   st->expect_delete
+		);
+	return 0;
+}
+
+static struct seq_operations ct_cpu_seq_ops = {
+	.start	= ct_cpu_seq_start,
+	.next	= ct_cpu_seq_next,
+	.stop	= ct_cpu_seq_stop,
+	.show	= ct_cpu_seq_show,
+};
+
+static int ct_cpu_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &ct_cpu_seq_ops);
+}
+
+static struct file_operations ct_cpu_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = ct_cpu_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release_private,
+};
+#endif /* CONFIG_PROC_FS */
+
+/* Sysctl support */
+
+#ifdef CONFIG_SYSCTL
+
+/* From nf_conntrack_core.c */
+extern int nf_conntrack_max;
+extern unsigned int nf_conntrack_htable_size;
+
+/* From nf_conntrack_proto_tcp.c */
+extern unsigned long nf_ct_tcp_timeout_syn_sent;
+extern unsigned long nf_ct_tcp_timeout_syn_recv;
+extern unsigned long nf_ct_tcp_timeout_established;
+extern unsigned long nf_ct_tcp_timeout_fin_wait;
+extern unsigned long nf_ct_tcp_timeout_close_wait;
+extern unsigned long nf_ct_tcp_timeout_last_ack;
+extern unsigned long nf_ct_tcp_timeout_time_wait;
+extern unsigned long nf_ct_tcp_timeout_close;
+extern unsigned long nf_ct_tcp_timeout_max_retrans;
+extern int nf_ct_tcp_loose;
+extern int nf_ct_tcp_be_liberal;
+extern int nf_ct_tcp_max_retrans;
+
+/* From nf_conntrack_proto_udp.c */
+extern unsigned long nf_ct_udp_timeout;
+extern unsigned long nf_ct_udp_timeout_stream;
+
+/* From nf_conntrack_proto_generic.c */
+extern unsigned long nf_ct_generic_timeout;
+
+/* Log invalid packets of a given protocol */
+static int log_invalid_proto_min = 0;
+static int log_invalid_proto_max = 255;
+
+static struct ctl_table_header *nf_ct_sysctl_header;
+
+static ctl_table nf_ct_sysctl_table[] = {
+	{
+		.ctl_name	= NET_NF_CONNTRACK_MAX,
+		.procname	= "nf_conntrack_max",
+		.data		= &nf_conntrack_max,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_COUNT,
+		.procname	= "nf_conntrack_count",
+		.data		= &nf_conntrack_count,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name       = NET_NF_CONNTRACK_BUCKETS,
+		.procname       = "nf_conntrack_buckets",
+		.data           = &nf_conntrack_htable_size,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0444,
+		.proc_handler   = &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT,
+		.procname	= "nf_conntrack_tcp_timeout_syn_sent",
+		.data		= &nf_ct_tcp_timeout_syn_sent,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV,
+		.procname	= "nf_conntrack_tcp_timeout_syn_recv",
+		.data		= &nf_ct_tcp_timeout_syn_recv,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED,
+		.procname	= "nf_conntrack_tcp_timeout_established",
+		.data		= &nf_ct_tcp_timeout_established,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT,
+		.procname	= "nf_conntrack_tcp_timeout_fin_wait",
+		.data		= &nf_ct_tcp_timeout_fin_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT,
+		.procname	= "nf_conntrack_tcp_timeout_close_wait",
+		.data		= &nf_ct_tcp_timeout_close_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK,
+		.procname	= "nf_conntrack_tcp_timeout_last_ack",
+		.data		= &nf_ct_tcp_timeout_last_ack,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT,
+		.procname	= "nf_conntrack_tcp_timeout_time_wait",
+		.data		= &nf_ct_tcp_timeout_time_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE,
+		.procname	= "nf_conntrack_tcp_timeout_close",
+		.data		= &nf_ct_tcp_timeout_close,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_UDP_TIMEOUT,
+		.procname	= "nf_conntrack_udp_timeout",
+		.data		= &nf_ct_udp_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM,
+		.procname	= "nf_conntrack_udp_timeout_stream",
+		.data		= &nf_ct_udp_timeout_stream,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_GENERIC_TIMEOUT,
+		.procname	= "nf_conntrack_generic_timeout",
+		.data		= &nf_ct_generic_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_LOG_INVALID,
+		.procname	= "nf_conntrack_log_invalid",
+		.data		= &nf_ct_log_invalid,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &log_invalid_proto_min,
+		.extra2		= &log_invalid_proto_max,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS,
+		.procname	= "nf_conntrack_tcp_timeout_max_retrans",
+		.data		= &nf_ct_tcp_timeout_max_retrans,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_LOOSE,
+		.procname	= "nf_conntrack_tcp_loose",
+		.data		= &nf_ct_tcp_loose,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_BE_LIBERAL,
+		.procname       = "nf_conntrack_tcp_be_liberal",
+		.data           = &nf_ct_tcp_be_liberal,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_MAX_RETRANS,
+		.procname	= "nf_conntrack_tcp_max_retrans",
+		.data		= &nf_ct_tcp_max_retrans,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+
+	{ .ctl_name = 0 }
+};
+
+#define NET_NF_CONNTRACK_MAX 2089
+
+static ctl_table nf_ct_netfilter_table[] = {
+	{
+		.ctl_name	= NET_NETFILTER,
+		.procname	= "netfilter",
+		.mode		= 0555,
+		.child		= nf_ct_sysctl_table,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_MAX,
+		.procname	= "nf_conntrack_max",
+		.data		= &nf_conntrack_max,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_net_table[] = {
+	{
+		.ctl_name	= CTL_NET,
+		.procname	= "net",
+		.mode		= 0555,
+		.child		= nf_ct_netfilter_table,
+	},
+	{ .ctl_name = 0 }
+};
+EXPORT_SYMBOL(nf_ct_log_invalid);
+#endif /* CONFIG_SYSCTL */
+
+static int init_or_cleanup(int init)
+{
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *proc, *proc_exp, *proc_stat;
+#endif
+	int ret = 0;
+
+	if (!init) goto cleanup;
+
+	ret = nf_conntrack_init();
+	if (ret < 0)
+		goto cleanup_nothing;
+
+#ifdef CONFIG_PROC_FS
+	proc = proc_net_fops_create("nf_conntrack", 0440, &ct_file_ops);
+	if (!proc) goto cleanup_init;
+
+	proc_exp = proc_net_fops_create("nf_conntrack_expect", 0440,
+					&exp_file_ops);
+	if (!proc_exp) goto cleanup_proc;
+
+	proc_stat = create_proc_entry("nf_conntrack", S_IRUGO, proc_net_stat);
+	if (!proc_stat)
+		goto cleanup_proc_exp;
+
+	proc_stat->proc_fops = &ct_cpu_seq_fops;
+	proc_stat->owner = THIS_MODULE;
+#endif
+#ifdef CONFIG_SYSCTL
+	nf_ct_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
+	if (nf_ct_sysctl_header == NULL) {
+		printk("nf_conntrack: can't register to sysctl.\n");
+		ret = -ENOMEM;
+		goto cleanup_proc_stat;
+	}
+#endif
+
+	return ret;
+
+ cleanup:
+#ifdef CONFIG_SYSCTL
+ 	unregister_sysctl_table(nf_ct_sysctl_header);
+ cleanup_proc_stat:
+#endif
+#ifdef CONFIG_PROC_FS
+	proc_net_remove("nf_conntrack_stat");
+ cleanup_proc_exp:
+	proc_net_remove("nf_conntrack_expect");
+ cleanup_proc:
+	proc_net_remove("nf_conntrack");
+ cleanup_init:
+#endif /* CNFIG_PROC_FS */
+	nf_conntrack_cleanup();
+ cleanup_nothing:
+	return ret;
+}
+
+int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
+{
+	int ret = 0;
+
+	write_lock_bh(&nf_conntrack_lock);
+	if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_generic_l3proto) {
+		ret = -EBUSY;
+		goto out;
+	}
+	nf_ct_l3protos[proto->l3proto] = proto;
+out:
+	write_unlock_bh(&nf_conntrack_lock);
+
+	return ret;
+}
+
+void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
+{
+	write_lock_bh(&nf_conntrack_lock);
+	nf_ct_l3protos[proto->l3proto] = &nf_conntrack_generic_l3proto;
+	write_unlock_bh(&nf_conntrack_lock);
+	
+	/* Somebody could be still looking at the proto in bh. */
+	synchronize_net();
+
+	/* Remove all contrack entries for this protocol */
+	nf_ct_iterate_cleanup(kill_l3proto, proto);
+}
+
+/* FIXME: Allow NULL functions and sub in pointers to generic for
+   them. --RR */
+int nf_conntrack_protocol_register(struct nf_conntrack_protocol *proto)
+{
+	int ret = 0;
+
+retry:
+	write_lock_bh(&nf_conntrack_lock);
+	if (nf_ct_protos[proto->l3proto]) {
+		if (nf_ct_protos[proto->l3proto][proto->proto]
+				!= &nf_conntrack_generic_protocol) {
+			ret = -EBUSY;
+			goto out_unlock;
+		}
+	} else {
+		/* l3proto may be loaded latter. */
+		struct nf_conntrack_protocol **proto_array;
+		int i;
+
+		write_unlock_bh(&nf_conntrack_lock);
+
+		proto_array = (struct nf_conntrack_protocol **)
+				kmalloc(MAX_NF_CT_PROTO *
+					 sizeof(struct nf_conntrack_protocol *),
+					GFP_KERNEL);
+		if (proto_array == NULL) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < MAX_NF_CT_PROTO; i++)
+			proto_array[i] = &nf_conntrack_generic_protocol;
+
+		write_lock_bh(&nf_conntrack_lock);
+		if (nf_ct_protos[proto->l3proto]) {
+			/* bad timing, but no problem */
+			write_unlock_bh(&nf_conntrack_lock);
+			kfree(proto_array);
+		} else {
+			nf_ct_protos[proto->l3proto] = proto_array;
+			write_unlock_bh(&nf_conntrack_lock);
+		}
+
+		/*
+		 * Just once because array is never freed until unloading
+		 * nf_conntrack.ko
+		 */
+		goto retry;
+	}
+
+	nf_ct_protos[proto->l3proto][proto->proto] = proto;
+
+out_unlock:
+	write_unlock_bh(&nf_conntrack_lock);
+out:
+	return ret;
+}
+
+void nf_conntrack_protocol_unregister(struct nf_conntrack_protocol *proto)
+{
+	write_lock_bh(&nf_conntrack_lock);
+	nf_ct_protos[proto->l3proto][proto->proto]
+		= &nf_conntrack_generic_protocol;
+	write_unlock_bh(&nf_conntrack_lock);
+	
+	/* Somebody could be still looking at the proto in bh. */
+	synchronize_net();
+
+	/* Remove all contrack entries for this protocol */
+	nf_ct_iterate_cleanup(kill_proto, proto);
+}
+
+static int __init init(void)
+{
+	return init_or_cleanup(1);
+}
+
+static void __exit fini(void)
+{
+	init_or_cleanup(0);
+}
+
+module_init(init);
+module_exit(fini);
+
+/* Some modules need us, but don't depend directly on any symbol.
+   They should call this. */
+void need_nf_conntrack(void)
+{
+}
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+EXPORT_SYMBOL_GPL(nf_conntrack_chain);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain);
+EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
+EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
+EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache);
+EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
+#endif
+EXPORT_SYMBOL(nf_conntrack_l3proto_register);
+EXPORT_SYMBOL(nf_conntrack_l3proto_unregister);
+EXPORT_SYMBOL(nf_conntrack_protocol_register);
+EXPORT_SYMBOL(nf_conntrack_protocol_unregister);
+EXPORT_SYMBOL(nf_ct_invert_tuplepr);
+EXPORT_SYMBOL(nf_conntrack_alter_reply);
+EXPORT_SYMBOL(nf_conntrack_destroyed);
+EXPORT_SYMBOL(need_nf_conntrack);
+EXPORT_SYMBOL(nf_conntrack_helper_register);
+EXPORT_SYMBOL(nf_conntrack_helper_unregister);
+EXPORT_SYMBOL(nf_ct_iterate_cleanup);
+EXPORT_SYMBOL(__nf_ct_refresh_acct);
+EXPORT_SYMBOL(nf_ct_protos);
+EXPORT_SYMBOL(nf_ct_find_proto);
+EXPORT_SYMBOL(nf_ct_l3protos);
+EXPORT_SYMBOL(nf_conntrack_expect_alloc);
+EXPORT_SYMBOL(nf_conntrack_expect_put);
+EXPORT_SYMBOL(nf_conntrack_expect_related);
+EXPORT_SYMBOL(nf_conntrack_unexpect_related);
+EXPORT_SYMBOL(nf_conntrack_tuple_taken);
+EXPORT_SYMBOL(nf_conntrack_htable_size);
+EXPORT_SYMBOL(nf_conntrack_lock);
+EXPORT_SYMBOL(nf_conntrack_hash);
+EXPORT_SYMBOL(nf_conntrack_untracked);
+EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+EXPORT_SYMBOL(nf_conntrack_tcp_update);
+#endif
+EXPORT_SYMBOL(__nf_conntrack_confirm);
+EXPORT_SYMBOL(nf_ct_get_tuple);
+EXPORT_SYMBOL(nf_ct_invert_tuple);
+EXPORT_SYMBOL(nf_conntrack_in);
+EXPORT_SYMBOL(__nf_conntrack_attach);
diff --git a/net/netlink/Makefile b/net/netlink/Makefile
index 39d9c2d..e3589c2 100644
--- a/net/netlink/Makefile
+++ b/net/netlink/Makefile
@@ -2,4 +2,4 @@
 # Makefile for the netlink driver.
 #
 
-obj-y  				:= af_netlink.o
+obj-y  				:= af_netlink.o attr.o genetlink.o
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5ca2835..8c38ee6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -58,6 +58,7 @@
 
 #include <net/sock.h>
 #include <net/scm.h>
+#include <net/netlink.h>
 
 #define Nprintk(a...)
 #define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -427,7 +428,8 @@
 
 	spin_lock(&nlk->cb_lock);
 	if (nlk->cb) {
-		nlk->cb->done(nlk->cb);
+		if (nlk->cb->done)
+			nlk->cb->done(nlk->cb);
 		netlink_destroy_callback(nlk->cb);
 		nlk->cb = NULL;
 	}
@@ -1322,7 +1324,8 @@
 	skb_queue_tail(&sk->sk_receive_queue, skb);
 	sk->sk_data_ready(sk, skb->len);
 
-	cb->done(cb);
+	if (cb->done)
+		cb->done(cb);
 	nlk->cb = NULL;
 	spin_unlock(&nlk->cb_lock);
 
@@ -1409,6 +1412,94 @@
 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
 }
 
+static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
+						     struct nlmsghdr *, int *))
+{
+	unsigned int total_len;
+	struct nlmsghdr *nlh;
+	int err;
+
+	while (skb->len >= nlmsg_total_size(0)) {
+		nlh = (struct nlmsghdr *) skb->data;
+
+		if (skb->len < nlh->nlmsg_len)
+			return 0;
+
+		total_len = min(NLMSG_ALIGN(nlh->nlmsg_len), skb->len);
+
+		if (cb(skb, nlh, &err) < 0) {
+			/* Not an error, but we have to interrupt processing
+			 * here. Note: that in this case we do not pull
+			 * message from skb, it will be processed later.
+			 */
+			if (err == 0)
+				return -1;
+			netlink_ack(skb, nlh, err);
+		} else if (nlh->nlmsg_flags & NLM_F_ACK)
+			netlink_ack(skb, nlh, 0);
+
+		skb_pull(skb, total_len);
+	}
+
+	return 0;
+}
+
+/**
+ * nelink_run_queue - Process netlink receive queue.
+ * @sk: Netlink socket containing the queue
+ * @qlen: Place to store queue length upon entry
+ * @cb: Callback function invoked for each netlink message found
+ *
+ * Processes as much as there was in the queue upon entry and invokes
+ * a callback function for each netlink message found. The callback
+ * function may refuse a message by returning a negative error code
+ * but setting the error pointer to 0 in which case this function
+ * returns with a qlen != 0.
+ *
+ * qlen must be initialized to 0 before the initial entry, afterwards
+ * the function may be called repeatedly until qlen reaches 0.
+ */
+void netlink_run_queue(struct sock *sk, unsigned int *qlen,
+		       int (*cb)(struct sk_buff *, struct nlmsghdr *, int *))
+{
+	struct sk_buff *skb;
+
+	if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue))
+		*qlen = skb_queue_len(&sk->sk_receive_queue);
+
+	for (; *qlen; (*qlen)--) {
+		skb = skb_dequeue(&sk->sk_receive_queue);
+		if (netlink_rcv_skb(skb, cb)) {
+			if (skb->len)
+				skb_queue_head(&sk->sk_receive_queue, skb);
+			else {
+				kfree_skb(skb);
+				(*qlen)--;
+			}
+			break;
+		}
+
+		kfree_skb(skb);
+	}
+}
+
+/**
+ * netlink_queue_skip - Skip netlink message while processing queue.
+ * @nlh: Netlink message to be skipped
+ * @skb: Socket buffer containing the netlink messages.
+ *
+ * Pulls the given netlink message off the socket buffer so the next
+ * call to netlink_queue_run() will not reconsider the message.
+ */
+void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
+{
+	int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+
+	if (msglen > skb->len)
+		msglen = skb->len;
+
+	skb_pull(skb, msglen);
+}
 
 #ifdef CONFIG_PROC_FS
 struct nl_seq_iter {
@@ -1657,6 +1748,8 @@
 core_initcall(netlink_proto_init);
 
 EXPORT_SYMBOL(netlink_ack);
+EXPORT_SYMBOL(netlink_run_queue);
+EXPORT_SYMBOL(netlink_queue_skip);
 EXPORT_SYMBOL(netlink_broadcast);
 EXPORT_SYMBOL(netlink_dump_start);
 EXPORT_SYMBOL(netlink_kernel_create);
diff --git a/net/netlink/attr.c b/net/netlink/attr.c
new file mode 100644
index 0000000..fffef4a
--- /dev/null
+++ b/net/netlink/attr.c
@@ -0,0 +1,328 @@
+/*
+ * NETLINK      Netlink attributes
+ *
+ * 		Authors:	Thomas Graf <tgraf@suug.ch>
+ * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <net/netlink.h>
+
+static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
+	[NLA_U8]	= sizeof(u8),
+	[NLA_U16]	= sizeof(u16),
+	[NLA_U32]	= sizeof(u32),
+	[NLA_U64]	= sizeof(u64),
+	[NLA_STRING]	= 1,
+	[NLA_NESTED]	= NLA_HDRLEN,
+};
+
+static int validate_nla(struct nlattr *nla, int maxtype,
+			struct nla_policy *policy)
+{
+	struct nla_policy *pt;
+	int minlen = 0;
+
+	if (nla->nla_type <= 0 || nla->nla_type > maxtype)
+		return 0;
+
+	pt = &policy[nla->nla_type];
+
+	BUG_ON(pt->type > NLA_TYPE_MAX);
+
+	if (pt->minlen)
+		minlen = pt->minlen;
+	else if (pt->type != NLA_UNSPEC)
+		minlen = nla_attr_minlen[pt->type];
+
+	if (pt->type == NLA_FLAG && nla_len(nla) > 0)
+		return -ERANGE;
+
+	if (nla_len(nla) < minlen)
+		return -ERANGE;
+
+	return 0;
+}
+
+/**
+ * nla_validate - Validate a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ *
+ * Validates all attributes in the specified attribute stream against the
+ * specified policy. Attributes with a type exceeding maxtype will be
+ * ignored. See documenation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int nla_validate(struct nlattr *head, int len, int maxtype,
+		 struct nla_policy *policy)
+{
+	struct nlattr *nla;
+	int rem, err;
+
+	nla_for_each_attr(nla, head, len, rem) {
+		err = validate_nla(nla, maxtype, policy);
+		if (err < 0)
+			goto errout;
+	}
+
+	err = 0;
+errout:
+	return err;
+}
+
+/**
+ * nla_parse - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessable via the attribute type. Attributes with a type
+ * exceeding maxtype will be silently ignored for backwards compatibility
+ * reasons. policy may be set to NULL if no validation is required.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
+	      struct nla_policy *policy)
+{
+	struct nlattr *nla;
+	int rem, err;
+
+	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
+
+	nla_for_each_attr(nla, head, len, rem) {
+		u16 type = nla->nla_type;
+
+		if (type > 0 && type <= maxtype) {
+			if (policy) {
+				err = validate_nla(nla, maxtype, policy);
+				if (err < 0)
+					goto errout;
+			}
+
+			tb[type] = nla;
+		}
+	}
+
+	if (unlikely(rem > 0))
+		printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
+		       "attributes.\n", rem);
+
+	err = 0;
+errout:
+	return err;
+}
+
+/**
+ * nla_find - Find a specific attribute in a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @attrtype: type of attribute to look for
+ *
+ * Returns the first attribute in the stream matching the specified type.
+ */
+struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
+{
+	struct nlattr *nla;
+	int rem;
+
+	nla_for_each_attr(nla, head, len, rem)
+		if (nla->nla_type == attrtype)
+			return nla;
+
+	return NULL;
+}
+
+/**
+ * nla_strlcpy - Copy string attribute payload into a sized buffer
+ * @dst: where to copy the string to
+ * @src: attribute to copy the string from
+ * @dstsize: size of destination buffer
+ *
+ * Copies at most dstsize - 1 bytes into the destination buffer.
+ * The result is always a valid NUL-terminated string. Unlike
+ * strlcpy the destination buffer is always padded out.
+ *
+ * Returns the length of the source buffer.
+ */
+size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
+{
+	size_t srclen = nla_len(nla);
+	char *src = nla_data(nla);
+
+	if (srclen > 0 && src[srclen - 1] == '\0')
+		srclen--;
+
+	if (dstsize > 0) {
+		size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
+
+		memset(dst, 0, dstsize);
+		memcpy(dst, src, len);
+	}
+
+	return srclen;
+}
+
+/**
+ * nla_memcpy - Copy a netlink attribute into another memory area
+ * @dest: where to copy to memcpy
+ * @src: netlink attribute to copy from
+ * @count: size of the destination area
+ *
+ * Note: The number of bytes copied is limited by the length of
+ *       attribute's payload. memcpy
+ *
+ * Returns the number of bytes copied.
+ */
+int nla_memcpy(void *dest, struct nlattr *src, int count)
+{
+	int minlen = min_t(int, count, nla_len(src));
+
+	memcpy(dest, nla_data(src), minlen);
+
+	return minlen;
+}
+
+/**
+ * nla_memcmp - Compare an attribute with sized memory area
+ * @nla: netlink attribute
+ * @data: memory area
+ * @size: size of memory area
+ */
+int nla_memcmp(const struct nlattr *nla, const void *data,
+			     size_t size)
+{
+	int d = nla_len(nla) - size;
+
+	if (d == 0)
+		d = memcmp(nla_data(nla), data, size);
+
+	return d;
+}
+
+/**
+ * nla_strcmp - Compare a string attribute against a string
+ * @nla: netlink string attribute
+ * @str: another string
+ */
+int nla_strcmp(const struct nlattr *nla, const char *str)
+{
+	int len = strlen(str) + 1;
+	int d = nla_len(nla) - len;
+
+	if (d == 0)
+		d = memcmp(nla_data(nla), str, len);
+
+	return d;
+}
+
+/**
+ * __nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+	struct nlattr *nla;
+
+	nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
+	nla->nla_type = attrtype;
+	nla->nla_len = nla_attr_size(attrlen);
+
+	memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
+
+	return nla;
+}
+
+/**
+ * nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+	if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+		return NULL;
+
+	return __nla_reserve(skb, attrtype, attrlen);
+}
+
+/**
+ * __nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+			     const void *data)
+{
+	struct nlattr *nla;
+
+	nla = __nla_reserve(skb, attrtype, attrlen);
+	memcpy(nla_data(nla), data, attrlen);
+}
+
+
+/**
+ * nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -1 if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
+{
+	if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+		return -1;
+
+	__nla_put(skb, attrtype, attrlen, data);
+	return 0;
+}
+
+
+EXPORT_SYMBOL(nla_validate);
+EXPORT_SYMBOL(nla_parse);
+EXPORT_SYMBOL(nla_find);
+EXPORT_SYMBOL(nla_strlcpy);
+EXPORT_SYMBOL(__nla_reserve);
+EXPORT_SYMBOL(nla_reserve);
+EXPORT_SYMBOL(__nla_put);
+EXPORT_SYMBOL(nla_put);
+EXPORT_SYMBOL(nla_memcpy);
+EXPORT_SYMBOL(nla_memcmp);
+EXPORT_SYMBOL(nla_strcmp);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
new file mode 100644
index 0000000..287cfcc
--- /dev/null
+++ b/net/netlink/genetlink.c
@@ -0,0 +1,579 @@
+/*
+ * NETLINK      Generic Netlink Family
+ *
+ * 		Authors:	Jamal Hadi Salim
+ * 				Thomas Graf <tgraf@suug.ch>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/genetlink.h>
+
+struct sock *genl_sock = NULL;
+
+static DECLARE_MUTEX(genl_sem); /* serialization of message processing */
+
+static void genl_lock(void)
+{
+	down(&genl_sem);
+}
+
+static int genl_trylock(void)
+{
+	return down_trylock(&genl_sem);
+}
+
+static void genl_unlock(void)
+{
+	up(&genl_sem);
+
+	if (genl_sock && genl_sock->sk_receive_queue.qlen)
+		genl_sock->sk_data_ready(genl_sock, 0);
+}
+
+#define GENL_FAM_TAB_SIZE	16
+#define GENL_FAM_TAB_MASK	(GENL_FAM_TAB_SIZE - 1)
+
+static struct list_head family_ht[GENL_FAM_TAB_SIZE];
+
+static int genl_ctrl_event(int event, void *data);
+
+static inline unsigned int genl_family_hash(unsigned int id)
+{
+	return id & GENL_FAM_TAB_MASK;
+}
+
+static inline struct list_head *genl_family_chain(unsigned int id)
+{
+	return &family_ht[genl_family_hash(id)];
+}
+
+static struct genl_family *genl_family_find_byid(unsigned int id)
+{
+	struct genl_family *f;
+
+	list_for_each_entry(f, genl_family_chain(id), family_list)
+		if (f->id == id)
+			return f;
+
+	return NULL;
+}
+
+static struct genl_family *genl_family_find_byname(char *name)
+{
+	struct genl_family *f;
+	int i;
+
+	for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
+		list_for_each_entry(f, genl_family_chain(i), family_list)
+			if (strcmp(f->name, name) == 0)
+				return f;
+
+	return NULL;
+}
+
+static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
+{
+	struct genl_ops *ops;
+
+	list_for_each_entry(ops, &family->ops_list, ops_list)
+		if (ops->cmd == cmd)
+			return ops;
+
+	return NULL;
+}
+
+/* Of course we are going to have problems once we hit
+ * 2^16 alive types, but that can only happen by year 2K
+*/
+static inline u16 genl_generate_id(void)
+{
+	static u16 id_gen_idx;
+	int overflowed = 0;
+
+	do {
+		if (id_gen_idx == 0)
+			id_gen_idx = GENL_MIN_ID;
+
+		if (++id_gen_idx > GENL_MAX_ID) {
+			if (!overflowed) {
+				overflowed = 1;
+				id_gen_idx = 0;
+				continue;
+			} else
+				return 0;
+		}
+
+	} while (genl_family_find_byid(id_gen_idx));
+
+	return id_gen_idx;
+}
+
+/**
+ * genl_register_ops - register generic netlink operations
+ * @family: generic netlink family
+ * @ops: operations to be registered
+ *
+ * Registers the specified operations and assigns them to the specified
+ * family. Either a doit or dumpit callback must be specified or the
+ * operation will fail. Only one operation structure per command
+ * identifier may be registered.
+ *
+ * See include/net/genetlink.h for more documenation on the operations
+ * structure.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
+{
+	int err = -EINVAL;
+
+	if (ops->dumpit == NULL && ops->doit == NULL)
+		goto errout;
+
+	if (genl_get_cmd(ops->cmd, family)) {
+		err = -EEXIST;
+		goto errout;
+	}
+
+	genl_lock();
+	list_add_tail(&ops->ops_list, &family->ops_list);
+	genl_unlock();
+
+	genl_ctrl_event(CTRL_CMD_NEWOPS, ops);
+	err = 0;
+errout:
+	return err;
+}
+
+/**
+ * genl_unregister_ops - unregister generic netlink operations
+ * @family: generic netlink family
+ * @ops: operations to be unregistered
+ *
+ * Unregisters the specified operations and unassigns them from the
+ * specified family. The operation blocks until the current message
+ * processing has finished and doesn't start again until the
+ * unregister process has finished.
+ *
+ * Note: It is not necessary to unregister all operations before
+ *       unregistering the family, unregistering the family will cause
+ *       all assigned operations to be unregistered automatically.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
+{
+	struct genl_ops *rc;
+
+	genl_lock();
+	list_for_each_entry(rc, &family->ops_list, ops_list) {
+		if (rc == ops) {
+			list_del(&ops->ops_list);
+			genl_unlock();
+			genl_ctrl_event(CTRL_CMD_DELOPS, ops);
+			return 0;
+		}
+	}
+	genl_unlock();
+
+	return -ENOENT;
+}
+
+/**
+ * genl_register_family - register a generic netlink family
+ * @family: generic netlink family
+ *
+ * Registers the specified family after validating it first. Only one
+ * family may be registered with the same family name or identifier.
+ * The family id may equal GENL_ID_GENERATE causing an unique id to
+ * be automatically generated and assigned.
+ *
+ * Return 0 on success or a negative error code.
+ */
+int genl_register_family(struct genl_family *family)
+{
+	int err = -EINVAL;
+
+	if (family->id && family->id < GENL_MIN_ID)
+		goto errout;
+
+	if (family->id > GENL_MAX_ID)
+		goto errout;
+
+	INIT_LIST_HEAD(&family->ops_list);
+
+	genl_lock();
+
+	if (genl_family_find_byname(family->name)) {
+		err = -EEXIST;
+		goto errout_locked;
+	}
+
+	if (genl_family_find_byid(family->id)) {
+		err = -EEXIST;
+		goto errout_locked;
+	}
+
+	if (!try_module_get(family->owner)) {
+		err = -EBUSY;
+		goto errout_locked;
+	}
+
+	if (family->id == GENL_ID_GENERATE) {
+		u16 newid = genl_generate_id();
+
+		if (!newid) {
+			err = -ENOMEM;
+			goto errout_locked;
+		}
+
+		family->id = newid;
+	}
+
+	if (family->maxattr) {
+		family->attrbuf = kmalloc((family->maxattr+1) *
+					sizeof(struct nlattr *), GFP_KERNEL);
+		if (family->attrbuf == NULL) {
+			err = -ENOMEM;
+			goto errout;
+		}
+	} else
+		family->attrbuf = NULL;
+
+	list_add_tail(&family->family_list, genl_family_chain(family->id));
+	genl_unlock();
+
+	genl_ctrl_event(CTRL_CMD_NEWFAMILY, family);
+
+	return 0;
+
+errout_locked:
+	genl_unlock();
+errout:
+	return err;
+}
+
+/**
+ * genl_unregister_family - unregister generic netlink family
+ * @family: generic netlink family
+ *
+ * Unregisters the specified family.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int genl_unregister_family(struct genl_family *family)
+{
+	struct genl_family *rc;
+
+	genl_lock();
+
+	list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
+		if (family->id != rc->id || strcmp(rc->name, family->name))
+			continue;
+
+		list_del(&rc->family_list);
+		INIT_LIST_HEAD(&family->ops_list);
+		genl_unlock();
+
+		module_put(family->owner);
+		kfree(family->attrbuf);
+		genl_ctrl_event(CTRL_CMD_DELFAMILY, family);
+		return 0;
+	}
+
+	genl_unlock();
+
+	return -ENOENT;
+}
+
+static inline int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+			       int *errp)
+{
+	struct genl_ops *ops;
+	struct genl_family *family;
+	struct genl_info info;
+	struct genlmsghdr *hdr = nlmsg_data(nlh);
+	int hdrlen, err = -EINVAL;
+
+	if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
+		goto ignore;
+
+	if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
+		goto ignore;
+
+       	family = genl_family_find_byid(nlh->nlmsg_type);
+	if (family == NULL) {
+		err = -ENOENT;
+		goto errout;
+	}
+
+	hdrlen = GENL_HDRLEN + family->hdrsize;
+	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+		goto errout;
+
+	ops = genl_get_cmd(hdr->cmd, family);
+	if (ops == NULL) {
+		err = -EOPNOTSUPP;
+		goto errout;
+	}
+
+	if ((ops->flags & GENL_ADMIN_PERM) && security_netlink_recv(skb)) {
+		err = -EPERM;
+		goto errout;
+	}
+
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+		if (ops->dumpit == NULL) {
+			err = -EOPNOTSUPP;
+			goto errout;
+		}
+
+		*errp = err = netlink_dump_start(genl_sock, skb, nlh,
+						 ops->dumpit, NULL);
+		if (err == 0)
+			skb_pull(skb, min(NLMSG_ALIGN(nlh->nlmsg_len),
+					  skb->len));
+		return -1;
+	}
+
+	if (ops->doit == NULL) {
+		err = -EOPNOTSUPP;
+		goto errout;
+	}
+
+	if (family->attrbuf) {
+		err = nlmsg_parse(nlh, hdrlen, family->attrbuf, family->maxattr,
+				  ops->policy);
+		if (err < 0)
+			goto errout;
+	}
+
+	info.snd_seq = nlh->nlmsg_seq;
+	info.snd_pid = NETLINK_CB(skb).pid;
+	info.nlhdr = nlh;
+	info.genlhdr = nlmsg_data(nlh);
+	info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
+	info.attrs = family->attrbuf;
+
+	*errp = err = ops->doit(skb, &info);
+	return err;
+
+ignore:
+	return 0;
+
+errout:
+	*errp = err;
+	return -1;
+}
+
+static void genl_rcv(struct sock *sk, int len)
+{
+	unsigned int qlen = 0;
+
+	do {
+		if (genl_trylock())
+			return;
+		netlink_run_queue(sk, &qlen, &genl_rcv_msg);
+		genl_unlock();
+	} while (qlen && genl_sock && genl_sock->sk_receive_queue.qlen);
+}
+
+/**************************************************************************
+ * Controller
+ **************************************************************************/
+
+static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
+			  u32 flags, struct sk_buff *skb, u8 cmd)
+{
+	void *hdr;
+
+	hdr = genlmsg_put(skb, pid, seq, GENL_ID_CTRL, 0, flags, cmd,
+			  family->version);
+	if (hdr == NULL)
+		return -1;
+
+	NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name);
+	NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id);
+
+	return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+	return genlmsg_cancel(skb, hdr);
+}
+
+static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
+{
+
+	int i, n = 0;
+	struct genl_family *rt;
+	int chains_to_skip = cb->args[0];
+	int fams_to_skip = cb->args[1];
+
+	for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+		if (i < chains_to_skip)
+			continue;
+		n = 0;
+		list_for_each_entry(rt, genl_family_chain(i), family_list) {
+			if (++n < fams_to_skip)
+				continue;
+			if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid,
+					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					   skb, CTRL_CMD_NEWFAMILY) < 0)
+				goto errout;
+		}
+
+		fams_to_skip = 0;
+	}
+
+errout:
+	cb->args[0] = i;
+	cb->args[1] = n;
+
+	return skb->len;
+}
+
+static struct sk_buff *ctrl_build_msg(struct genl_family *family, u32 pid,
+				      int seq, int cmd)
+{
+	struct sk_buff *skb;
+	int err;
+
+	skb = nlmsg_new(NLMSG_GOODSIZE);
+	if (skb == NULL)
+		return ERR_PTR(-ENOBUFS);
+
+	err = ctrl_fill_info(family, pid, seq, 0, skb, cmd);
+	if (err < 0) {
+		nlmsg_free(skb);
+		return ERR_PTR(err);
+	}
+
+	return skb;
+}
+
+static struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] __read_mostly = {
+	[CTRL_ATTR_FAMILY_ID]	= { .type = NLA_U16 },
+	[CTRL_ATTR_FAMILY_NAME]	= { .type = NLA_STRING },
+};
+
+static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
+{
+	struct sk_buff *msg;
+	struct genl_family *res = NULL;
+	int err = -EINVAL;
+
+	if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
+		u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
+		res = genl_family_find_byid(id);
+	}
+
+	if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
+		char name[GENL_NAMSIZ];
+
+		if (nla_strlcpy(name, info->attrs[CTRL_ATTR_FAMILY_NAME],
+				GENL_NAMSIZ) >= GENL_NAMSIZ)
+			goto errout;
+
+		res = genl_family_find_byname(name);
+	}
+
+	if (res == NULL) {
+		err = -ENOENT;
+		goto errout;
+	}
+
+	msg = ctrl_build_msg(res, info->snd_pid, info->snd_seq,
+			     CTRL_CMD_NEWFAMILY);
+	if (IS_ERR(msg)) {
+		err = PTR_ERR(msg);
+		goto errout;
+	}
+
+	err = genlmsg_unicast(msg, info->snd_pid);
+errout:
+	return err;
+}
+
+static int genl_ctrl_event(int event, void *data)
+{
+	struct sk_buff *msg;
+
+	if (genl_sock == NULL)
+		return 0;
+
+	switch (event) {
+	case CTRL_CMD_NEWFAMILY:
+	case CTRL_CMD_DELFAMILY:
+		msg = ctrl_build_msg(data, 0, 0, event);
+		if (IS_ERR(msg))
+			return PTR_ERR(msg);
+
+		genlmsg_multicast(msg, 0, GENL_ID_CTRL);
+		break;
+	}
+
+	return 0;
+}
+
+static struct genl_ops genl_ctrl_ops = {
+	.cmd		= CTRL_CMD_GETFAMILY,
+	.doit		= ctrl_getfamily,
+	.dumpit		= ctrl_dumpfamily,
+	.policy		= ctrl_policy,
+};
+
+static struct genl_family genl_ctrl = {
+	.id = GENL_ID_CTRL,
+	.name = "nlctrl",
+	.version = 0x1,
+	.maxattr = CTRL_ATTR_MAX,
+	.owner = THIS_MODULE,
+};
+
+static int __init genl_init(void)
+{
+	int i, err;
+
+	for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
+		INIT_LIST_HEAD(&family_ht[i]);
+
+	err = genl_register_family(&genl_ctrl);
+	if (err < 0)
+		goto errout;
+
+	err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops);
+	if (err < 0)
+		goto errout_register;
+
+	netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
+	genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID,
+					  genl_rcv, THIS_MODULE);
+	if (genl_sock == NULL) {
+		panic("GENL: Cannot initialize generic netlink\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+
+errout_register:
+	genl_unregister_family(&genl_ctrl);
+errout:
+	panic("GENL: Cannot register controller: %d\n", err);
+	return err;
+}
+
+subsys_initcall(genl_init);
+
+EXPORT_SYMBOL(genl_sock);
+EXPORT_SYMBOL(genl_register_ops);
+EXPORT_SYMBOL(genl_unregister_ops);
+EXPORT_SYMBOL(genl_register_family);
+EXPORT_SYMBOL(genl_unregister_family);
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index 122c086e..dbe6105 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -23,6 +23,7 @@
 #include <linux/in.h>
 #include <linux/in6.h>
 #include <linux/icmp.h>
+#include <linux/skbuff.h>
 #include <net/sock.h>
 #include <net/ip.h>
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -475,15 +476,11 @@
 
 		/* we'll probably need to checksum it (didn't call
 		 * sock_recvmsg) */
-		if (pkt->ip_summed != CHECKSUM_UNNECESSARY) {
-			if ((unsigned short)
-			    csum_fold(skb_checksum(pkt, 0, pkt->len,
-						   pkt->csum))) {
-				kfree_skb(pkt);
-				rxrpc_krxiod_queue_transport(trans);
-				_leave(" CSUM failed");
-				return;
-			}
+		if (skb_checksum_complete(pkt)) {
+			kfree_skb(pkt);
+			rxrpc_krxiod_queue_transport(trans);
+			_leave(" CSUM failed");
+			return;
 		}
 
 		addr = pkt->nh.iph->saddr;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 702ede3..61c3abe 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -55,6 +55,7 @@
 static void	call_bind_status(struct rpc_task *task);
 static void	call_transmit(struct rpc_task *task);
 static void	call_status(struct rpc_task *task);
+static void	call_transmit_status(struct rpc_task *task);
 static void	call_refresh(struct rpc_task *task);
 static void	call_refreshresult(struct rpc_task *task);
 static void	call_timeout(struct rpc_task *task);
@@ -672,6 +673,18 @@
 	rpc_exit(task, -ERESTARTSYS);
 }
 
+static inline int
+rpc_task_need_encode(struct rpc_task *task)
+{
+	return task->tk_rqstp->rq_snd_buf.len == 0;
+}
+
+static inline void
+rpc_task_force_reencode(struct rpc_task *task)
+{
+	task->tk_rqstp->rq_snd_buf.len = 0;
+}
+
 /*
  * 3.	Encode arguments of an RPC call
  */
@@ -867,12 +880,14 @@
 	if (task->tk_status != 0)
 		return;
 	/* Encode here so that rpcsec_gss can use correct sequence number. */
-	if (task->tk_rqstp->rq_bytes_sent == 0) {
+	if (rpc_task_need_encode(task)) {
+		task->tk_rqstp->rq_bytes_sent = 0;
 		call_encode(task);
 		/* Did the encode result in an error condition? */
 		if (task->tk_status != 0)
 			goto out_nosend;
 	}
+	task->tk_action = call_transmit_status;
 	xprt_transmit(task);
 	if (task->tk_status < 0)
 		return;
@@ -884,6 +899,7 @@
 out_nosend:
 	/* release socket write lock before attempting to handle error */
 	xprt_abort_transmit(task);
+	rpc_task_force_reencode(task);
 }
 
 /*
@@ -915,7 +931,6 @@
 		break;
 	case -ECONNREFUSED:
 	case -ENOTCONN:
-		req->rq_bytes_sent = 0;
 		if (clnt->cl_autobind)
 			clnt->cl_port = 0;
 		task->tk_action = call_bind;
@@ -937,7 +952,18 @@
 }
 
 /*
- * 6a.	Handle RPC timeout
+ * 6a.	Handle transmission errors.
+ */
+static void
+call_transmit_status(struct rpc_task *task)
+{
+	if (task->tk_status != -EAGAIN)
+		rpc_task_force_reencode(task);
+	call_status(task);
+}
+
+/*
+ * 6b.	Handle RPC timeout
  * 	We do not release the request slot, so we keep using the
  *	same XID for all retransmits.
  */
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 8f97e90..eb330d4 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -6,6 +6,9 @@
  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  */
 
+#include <linux/compiler.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
 #include <linux/types.h>
 #include <linux/pagemap.h>
 #include <linux/udp.h>
@@ -165,6 +168,8 @@
 		return -1;
 	if ((unsigned short)csum_fold(desc.csum))
 		return -1;
+	if (unlikely(skb->ip_summed == CHECKSUM_HW))
+		netdev_rx_csum_fault(skb->dev);
 	return 0;
 no_checksum:
 	if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f16e7cd..e50e7cf 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -623,12 +623,9 @@
 		/* we can use it in-place */
 		rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
 		rqstp->rq_arg.head[0].iov_len = len;
-		if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
-			if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
-				skb_free_datagram(svsk->sk_sk, skb);
-				return 0;
-			}
-			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		if (skb_checksum_complete(skb)) {
+			skb_free_datagram(svsk->sk_sk, skb);
+			return 0;
 		}
 		rqstp->rq_skbuff = skb;
 	}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c35336a..0cdd9a0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -18,7 +18,6 @@
 #include <linux/string.h>
 #include <linux/net.h>
 #include <linux/skbuff.h>
-#include <linux/netlink.h>
 #include <linux/rtnetlink.h>
 #include <linux/pfkeyv2.h>
 #include <linux/ipsec.h>
@@ -26,6 +25,7 @@
 #include <linux/security.h>
 #include <net/sock.h>
 #include <net/xfrm.h>
+#include <net/netlink.h>
 #include <asm/uaccess.h>
 
 static struct sock *xfrm_nl;
@@ -948,11 +948,6 @@
 	[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy  },
 };
 
-static int xfrm_done(struct netlink_callback *cb)
-{
-	return 0;
-}
-
 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
 {
 	struct rtattr *xfrma[XFRMA_MAX];
@@ -984,20 +979,15 @@
 	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
 	     type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
 	    (nlh->nlmsg_flags & NLM_F_DUMP)) {
-		u32 rlen;
-
 		if (link->dump == NULL)
 			goto err_einval;
 
 		if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh,
-						link->dump,
-						xfrm_done)) != 0) {
+						link->dump, NULL)) != 0) {
 			return -1;
 		}
-		rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-		if (rlen > skb->len)
-			rlen = skb->len;
-		skb_pull(skb, rlen);
+
+		netlink_queue_skip(nlh, skb);
 		return -1;
 	}
 
@@ -1032,60 +1022,13 @@
 	return -1;
 }
 
-static int xfrm_user_rcv_skb(struct sk_buff *skb)
-{
-	int err;
-	struct nlmsghdr *nlh;
-
-	while (skb->len >= NLMSG_SPACE(0)) {
-		u32 rlen;
-
-		nlh = (struct nlmsghdr *) skb->data;
-		if (nlh->nlmsg_len < sizeof(*nlh) ||
-		    skb->len < nlh->nlmsg_len)
-			return 0;
-		rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-		if (rlen > skb->len)
-			rlen = skb->len;
-		if (xfrm_user_rcv_msg(skb, nlh, &err) < 0) {
-			if (err == 0)
-				return -1;
-			netlink_ack(skb, nlh, err);
-		} else if (nlh->nlmsg_flags & NLM_F_ACK)
-			netlink_ack(skb, nlh, 0);
-		skb_pull(skb, rlen);
-	}
-
-	return 0;
-}
-
 static void xfrm_netlink_rcv(struct sock *sk, int len)
 {
-	unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
+	unsigned int qlen = 0;
 
 	do {
-		struct sk_buff *skb;
-
 		down(&xfrm_cfg_sem);
-
-		if (qlen > skb_queue_len(&sk->sk_receive_queue))
-			qlen = skb_queue_len(&sk->sk_receive_queue);
-
-		for (; qlen; qlen--) {
-			skb = skb_dequeue(&sk->sk_receive_queue);
-			if (xfrm_user_rcv_skb(skb)) {
-				if (skb->len)
-					skb_queue_head(&sk->sk_receive_queue,
-						       skb);
-				else {
-					kfree_skb(skb);
-					qlen--;
-				}
-				break;
-			}
-			kfree_skb(skb);
-		}
-
+		netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
 		up(&xfrm_cfg_sem);
 
 	} while (qlen);