Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-davinci into devel
diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
new file mode 100644
index 0000000..6dcf75e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -0,0 +1,479 @@
+What:		/sys/kernel/slab
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The /sys/kernel/slab directory contains a snapshot of the
+		internal state of the SLUB allocator for each cache.  Certain
+		files may be modified to change the behavior of the cache (and
+		any cache it aliases, if any).
+Users:		kernel memory tuning tools
+
+What:		/sys/kernel/slab/cache/aliases
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The aliases file is read-only and specifies how many caches
+		have merged into this cache.
+
+What:		/sys/kernel/slab/cache/align
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The align file is read-only and specifies the cache's object
+		alignment in bytes.
+
+What:		/sys/kernel/slab/cache/alloc_calls
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The alloc_calls file is read-only and lists the kernel code
+		locations from which allocations for this cache were performed.
+		The alloc_calls file only contains information if debugging is
+		enabled for that cache (see Documentation/vm/slub.txt).
+
+What:		/sys/kernel/slab/cache/alloc_fastpath
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The alloc_fastpath file is read-only and specifies how many
+		objects have been allocated using the fast path.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/alloc_from_partial
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The alloc_from_partial file is read-only and specifies how
+		many times a cpu slab has been full and it has been refilled
+		by using a slab from the list of partially used slabs.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/alloc_refill
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The alloc_refill file is read-only and specifies how many
+		times the per-cpu freelist was empty but there were objects
+		available as the result of remote cpu frees.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/alloc_slab
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The alloc_slab file is read-only and specifies how many times
+		a new slab had to be allocated from the page allocator.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/alloc_slowpath
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The alloc_slowpath file is read-only and specifies how many
+		objects have been allocated using the slow path because of a
+		refill or allocation from a partial or new slab.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/cache_dma
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The cache_dma file is read-only and specifies whether objects
+		are from ZONE_DMA.
+		Available when CONFIG_ZONE_DMA is enabled.
+
+What:		/sys/kernel/slab/cache/cpu_slabs
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The cpu_slabs file is read-only and displays how many cpu slabs
+		are active and their NUMA locality.
+
+What:		/sys/kernel/slab/cache/cpuslab_flush
+Date:		April 2009
+KernelVersion:	2.6.31
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file cpuslab_flush is read-only and specifies how many
+		times a cache's cpu slabs have been flushed as the result of
+		destroying or shrinking a cache, a cpu going offline, or as
+		the result of forcing an allocation from a certain node.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/ctor
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The ctor file is read-only and specifies the cache's object
+		constructor function, which is invoked for each object when a
+		new slab is allocated.
+
+What:		/sys/kernel/slab/cache/deactivate_empty
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file deactivate_empty is read-only and specifies how many
+		times an empty cpu slab was deactivated.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/deactivate_full
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file deactivate_full is read-only and specifies how many
+		times a full cpu slab was deactivated.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/deactivate_remote_frees
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file deactivate_remote_frees is read-only and specifies how
+		many times a cpu slab has been deactivated and contained free
+		objects that were freed remotely.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/deactivate_to_head
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file deactivate_to_head is read-only and specifies how
+		many times a partial cpu slab was deactivated and added to the
+		head of its node's partial list.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/deactivate_to_tail
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file deactivate_to_tail is read-only and specifies how
+		many times a partial cpu slab was deactivated and added to the
+		tail of its node's partial list.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/destroy_by_rcu
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The destroy_by_rcu file is read-only and specifies whether
+		slabs (not objects) are freed by rcu.
+
+What:		/sys/kernel/slab/cache/free_add_partial
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file free_add_partial is read-only and specifies how many
+		times an object has been freed in a full slab so that it had to
+		added to its node's partial list.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/free_calls
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The free_calls file is read-only and lists the locations of
+		object frees if slab debugging is enabled (see
+		Documentation/vm/slub.txt).
+
+What:		/sys/kernel/slab/cache/free_fastpath
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The free_fastpath file is read-only and specifies how many
+		objects have been freed using the fast path because it was an
+		object from the cpu slab.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/free_frozen
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The free_frozen file is read-only and specifies how many
+		objects have been freed to a frozen slab (i.e. a remote cpu
+		slab).
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/free_remove_partial
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file free_remove_partial is read-only and specifies how
+		many times an object has been freed to a now-empty slab so
+		that it had to be removed from its node's partial list.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/free_slab
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The free_slab file is read-only and specifies how many times an
+		empty slab has been freed back to the page allocator.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/free_slowpath
+Date:		February 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The free_slowpath file is read-only and specifies how many
+		objects have been freed using the slow path (i.e. to a full or
+		partial slab).
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/hwcache_align
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The hwcache_align file is read-only and specifies whether
+		objects are aligned on cachelines.
+
+What:		/sys/kernel/slab/cache/min_partial
+Date:		February 2009
+KernelVersion:	2.6.30
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		David Rientjes <rientjes@google.com>
+Description:
+		The min_partial file specifies how many empty slabs shall
+		remain on a node's partial list to avoid the overhead of
+		allocating new slabs.  Such slabs may be reclaimed by utilizing
+		the shrink file.
+
+What:		/sys/kernel/slab/cache/object_size
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The object_size file is read-only and specifies the cache's
+		object size.
+
+What:		/sys/kernel/slab/cache/objects
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The objects file is read-only and displays how many objects are
+		active and from which nodes they are from.
+
+What:		/sys/kernel/slab/cache/objects_partial
+Date:		April 2008
+KernelVersion:	2.6.26
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The objects_partial file is read-only and displays how many
+		objects are on partial slabs and from which nodes they are
+		from.
+
+What:		/sys/kernel/slab/cache/objs_per_slab
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file objs_per_slab is read-only and specifies how many
+		objects may be allocated from a single slab of the order
+		specified in /sys/kernel/slab/cache/order.
+
+What:		/sys/kernel/slab/cache/order
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The order file specifies the page order at which new slabs are
+		allocated.  It is writable and can be changed to increase the
+		number of objects per slab.  If a slab cannot be allocated
+		because of fragmentation, SLUB will retry with the minimum order
+		possible depending on its characteristics.
+
+What:		/sys/kernel/slab/cache/order_fallback
+Date:		April 2008
+KernelVersion:	2.6.26
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file order_fallback is read-only and specifies how many
+		times an allocation of a new slab has not been possible at the
+		cache's order and instead fallen back to its minimum possible
+		order.
+		Available when CONFIG_SLUB_STATS is enabled.
+
+What:		/sys/kernel/slab/cache/partial
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The partial file is read-only and displays how long many
+		partial slabs there are and how long each node's list is.
+
+What:		/sys/kernel/slab/cache/poison
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The poison file specifies whether objects should be poisoned
+		when a new slab is allocated.
+
+What:		/sys/kernel/slab/cache/reclaim_account
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The reclaim_account file specifies whether the cache's objects
+		are reclaimable (and grouped by their mobility).
+
+What:		/sys/kernel/slab/cache/red_zone
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The red_zone file specifies whether the cache's objects are red
+		zoned.
+
+What:		/sys/kernel/slab/cache/remote_node_defrag_ratio
+Date:		January 2008
+KernelVersion:	2.6.25
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The file remote_node_defrag_ratio specifies the percentage of
+		times SLUB will attempt to refill the cpu slab with a partial
+		slab from a remote node as opposed to allocating a new slab on
+		the local node.  This reduces the amount of wasted memory over
+		the entire system but can be expensive.
+		Available when CONFIG_NUMA is enabled.
+
+What:		/sys/kernel/slab/cache/sanity_checks
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The sanity_checks file specifies whether expensive checks
+		should be performed on free and, at minimum, enables double free
+		checks.  Caches that enable sanity_checks cannot be merged with
+		caches that do not.
+
+What:		/sys/kernel/slab/cache/shrink
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The shrink file is written when memory should be reclaimed from
+		a cache.  Empty partial slabs are freed and the partial list is
+		sorted so the slabs with the fewest available objects are used
+		first.
+
+What:		/sys/kernel/slab/cache/slab_size
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The slab_size file is read-only and specifies the object size
+		with metadata (debugging information and alignment) in bytes.
+
+What:		/sys/kernel/slab/cache/slabs
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The slabs file is read-only and displays how long many slabs
+		there are (both cpu and partial) and from which nodes they are
+		from.
+
+What:		/sys/kernel/slab/cache/store_user
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The store_user file specifies whether the location of
+		allocation or free should be tracked for a cache.
+
+What:		/sys/kernel/slab/cache/total_objects
+Date:		April 2008
+KernelVersion:	2.6.26
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The total_objects file is read-only and displays how many total
+		objects a cache has and from which nodes they are from.
+
+What:		/sys/kernel/slab/cache/trace
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		The trace file specifies whether object allocations and frees
+		should be traced.
+
+What:		/sys/kernel/slab/cache/validate
+Date:		May 2007
+KernelVersion:	2.6.22
+Contact:	Pekka Enberg <penberg@cs.helsinki.fi>,
+		Christoph Lameter <cl@linux-foundation.org>
+Description:
+		Writing to the validate file causes SLUB to traverse all of its
+		cache's objects and check the validity of metadata.
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index 222437e..3015da0 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -133,4 +133,4 @@
 Author:
    Christoph Rohland <cr@sap.com>, 1.12.01
 Updated:
-   Hugh Dickins <hugh@veritas.com>, 4 June 2007
+   Hugh Dickins, 4 June 2007
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ec5de02..b121c5d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1266,13 +1266,22 @@
 sctp_wmem  - vector of 3 INTEGERs: min, default, max
 	See tcp_wmem for a description.
 
-UNDOCUMENTED:
 
 /proc/sys/net/core/*
-	dev_weight FIXME
+dev_weight - INTEGER
+	The maximum number of packets that kernel can handle on a NAPI
+	interrupt, it's a Per-CPU variable.
+
+	Default: 64
 
 /proc/sys/net/unix/*
-	max_dgram_qlen FIXME
+max_dgram_qlen - INTEGER
+	The maximum length of dgram socket receive queue
+
+	Default: 10
+
+
+UNDOCUMENTED:
 
 /proc/sys/net/irda/*
 	fast_poll_increase FIXME
diff --git a/MAINTAINERS b/MAINTAINERS
index 2b349ba..77cbfb1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1431,6 +1431,14 @@
 M:	linux@arm.linux.org.uk
 F:	include/linux/clk.h
 
+CISCO FCOE HBA DRIVER
+P:	Abhijeet Joglekar
+M:	abjoglek@cisco.com
+P:	Joe Eykholt
+M:	jeykholt@cisco.com
+L:	linux-scsi@vger.kernel.org
+S:	Supported
+
 CODA FILE SYSTEM
 P:	Jan Harkes
 M:	jaharkes@cs.cmu.edu
@@ -5579,6 +5587,14 @@
 S:	Maintained
 F:	drivers/mmc/host/tmio_mmc.*
 
+TMPFS (SHMEM FILESYSTEM)
+P:	Hugh Dickins
+M:	hugh.dickins@tiscali.co.uk
+L:	linux-mm@kvack.org
+S:	Maintained
+F:	include/linux/shmem_fs.h
+F:	mm/shmem.c
+
 TPM DEVICE DRIVER
 P:	Debora Velarde
 M:	debora@linux.vnet.ibm.com
diff --git a/Makefile b/Makefile
index b57e1f5..739fd34 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 30
-EXTRAVERSION = -rc6
-NAME = Vindictive Armadillo
+EXTRAVERSION = -rc7
+NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c52f690..65bf774 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -281,6 +281,7 @@
 	select HAVE_CLK
 	select COMMON_CLKDEV
 	select ARCH_REQUIRE_GPIOLIB
+	select ARCH_HAS_HOLES_MEMORYMODEL
 	help
 	  This enables support for the Cirrus EP93xx series of CPUs.
 
@@ -595,6 +596,7 @@
 	select ZONE_DMA
 	select HAVE_IDE
 	select COMMON_CLKDEV
+	select GENERIC_ALLOCATOR
 	help
 	  Support for TI's DaVinci platform.
 
@@ -965,10 +967,9 @@
 	  UNPREDICTABLE (in fact it can be predicted that it won't work
 	  at all). If in doubt say Y.
 
-config ARCH_FLATMEM_HAS_HOLES
+config ARCH_HAS_HOLES_MEMORYMODEL
 	bool
-	default y
-	depends on FLATMEM
+	default n
 
 # Discontigmem is deprecated
 config ARCH_DISCONTIGMEM_ENABLE
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index c6884ba..3e1714c 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -253,9 +253,9 @@
 }
 
 #ifdef CONFIG_SMP
-void gic_raise_softirq(cpumask_t cpumask, unsigned int irq)
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 {
-	unsigned long map = *cpus_addr(cpumask);
+	unsigned long map = *cpus_addr(*mask);
 
 	/* this always happens on GIC0 */
 	writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index eb2738b..ac18662 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc2
-# Wed Apr 15 08:16:53 2009
+# Linux kernel version: 2.6.30-rc7
+# Tue May 26 07:24:28 2009
 #
 CONFIG_ARM=y
 CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@@ -179,6 +179,7 @@
 # CONFIG_ARCH_OMAP is not set
 # CONFIG_ARCH_MSM is not set
 # CONFIG_ARCH_W90X900 is not set
+CONFIG_AINTC=y
 
 #
 # TI DaVinci Implementations
@@ -188,11 +189,17 @@
 # DaVinci Core Type
 #
 CONFIG_ARCH_DAVINCI_DM644x=y
+CONFIG_ARCH_DAVINCI_DM355=y
+CONFIG_ARCH_DAVINCI_DM646x=y
 
 #
 # DaVinci Board Type
 #
 CONFIG_MACH_DAVINCI_EVM=y
+CONFIG_MACH_SFFSDR=y
+CONFIG_MACH_DAVINCI_DM355_EVM=y
+CONFIG_MACH_DM355_LEOPARD=y
+CONFIG_MACH_DAVINCI_DM6467_EVM=y
 CONFIG_DAVINCI_MUX=y
 CONFIG_DAVINCI_MUX_DEBUG=y
 CONFIG_DAVINCI_MUX_WARNINGS=y
@@ -245,7 +252,7 @@
 CONFIG_HZ=100
 CONFIG_AEABI=y
 # CONFIG_OABI_COMPAT is not set
-CONFIG_ARCH_FLATMEM_HAS_HOLES=y
+# CONFIG_ARCH_HAS_HOLES_MEMORYMODEL is not set
 # CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
 # CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
 # CONFIG_HIGHMEM is not set
@@ -661,7 +668,10 @@
 CONFIG_MII=y
 # CONFIG_AX88796 is not set
 # CONFIG_SMC91X is not set
-# CONFIG_DM9000 is not set
+CONFIG_TI_DAVINCI_EMAC=y
+CONFIG_DM9000=y
+CONFIG_DM9000_DEBUGLEVEL=4
+# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set
 # CONFIG_ETHOC is not set
 # CONFIG_SMC911X is not set
 # CONFIG_SMSC911X is not set
@@ -963,6 +973,7 @@
 # CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_MFD_ASIC3 is not set
+# CONFIG_MFD_DM355EVM_MSP is not set
 # CONFIG_HTC_EGPIO is not set
 # CONFIG_HTC_PASIC3 is not set
 # CONFIG_TPS65010 is not set
@@ -1317,6 +1328,7 @@
 # MMC/SD/SDIO Host Controller Drivers
 #
 # CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_DAVINCI is not set
 # CONFIG_MEMSTICK is not set
 # CONFIG_ACCESSIBILITY is not set
 CONFIG_NEW_LEDS=y
@@ -1778,6 +1790,7 @@
 CONFIG_ZLIB_INFLATE=y
 CONFIG_ZLIB_DEFLATE=m
 CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 4924914..7f34333b 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -36,7 +36,7 @@
 void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
 void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index fad70da..5995935 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -53,17 +53,12 @@
 /*
  * Raise an IPI cross call on CPUs in callmap.
  */
-extern void smp_cross_call(cpumask_t callmap);
-
-/*
- * Broadcast a timer interrupt to the other CPUs.
- */
-extern void smp_send_timer(void);
+extern void smp_cross_call(const struct cpumask *mask);
 
 /*
  * Broadcast a clock event to other CPUs.
  */
-extern void smp_timer_broadcast(cpumask_t mask);
+extern void smp_timer_broadcast(const struct cpumask *mask);
 
 /*
  * Boot a secondary CPU, and assign it the specified idle task.
@@ -102,7 +97,8 @@
 extern void platform_cpu_enable(unsigned int cpu);
 
 extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
 
 /*
  * Local timer interrupt handling function (can be IPI'ed).
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 7801aac..6014dfd 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -326,14 +326,14 @@
 	per_cpu(cpu_data, cpu).idle = current;
 }
 
-static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
+static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
 {
 	unsigned long flags;
 	unsigned int cpu;
 
 	local_irq_save(flags);
 
-	for_each_cpu_mask(cpu, callmap) {
+	for_each_cpu(cpu, mask) {
 		struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
 
 		spin_lock(&ipi->lock);
@@ -344,19 +344,19 @@
 	/*
 	 * Call the platform specific cross-CPU call function.
 	 */
-	smp_cross_call(callmap);
+	smp_cross_call(mask);
 
 	local_irq_restore(flags);
 }
 
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
 	send_ipi_message(mask, IPI_CALL_FUNC);
 }
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-	send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
+	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
 }
 
 void show_ipi_list(struct seq_file *p)
@@ -498,17 +498,10 @@
 
 void smp_send_reschedule(int cpu)
 {
-	send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
+	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
-void smp_send_timer(void)
-{
-	cpumask_t mask = cpu_online_map;
-	cpu_clear(smp_processor_id(), mask);
-	send_ipi_message(mask, IPI_TIMER);
-}
-
-void smp_timer_broadcast(cpumask_t mask)
+void smp_timer_broadcast(const struct cpumask *mask)
 {
 	send_ipi_message(mask, IPI_TIMER);
 }
@@ -517,7 +510,7 @@
 {
 	cpumask_t mask = cpu_online_map;
 	cpu_clear(smp_processor_id(), mask);
-	send_ipi_message(mask, IPI_CPU_STOP);
+	send_ipi_message(&mask, IPI_CPU_STOP);
 }
 
 /*
@@ -528,20 +521,17 @@
 	return -EINVAL;
 }
 
-static int
-on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
+static void
+on_each_cpu_mask(void (*func)(void *), void *info, int wait,
+		const struct cpumask *mask)
 {
-	int ret = 0;
-
 	preempt_disable();
 
-	ret = smp_call_function_mask(mask, func, info, wait);
-	if (cpu_isset(smp_processor_id(), mask))
+	smp_call_function_many(mask, func, info, wait);
+	if (cpumask_test_cpu(smp_processor_id(), mask))
 		func(info);
 
 	preempt_enable();
-
-	return ret;
 }
 
 /**********************************************************************/
@@ -602,20 +592,17 @@
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
-	cpumask_t mask = mm->cpu_vm_mask;
-
-	on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
+	on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 {
-	cpumask_t mask = vma->vm_mm->cpu_vm_mask;
 	struct tlb_args ta;
 
 	ta.ta_vma = vma;
 	ta.ta_start = uaddr;
 
-	on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
+	on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
 }
 
 void flush_tlb_kernel_page(unsigned long kaddr)
@@ -630,14 +617,13 @@
 void flush_tlb_range(struct vm_area_struct *vma,
                      unsigned long start, unsigned long end)
 {
-	cpumask_t mask = vma->vm_mm->cpu_vm_mask;
 	struct tlb_args ta;
 
 	ta.ta_vma = vma;
 	ta.ta_start = start;
 	ta.ta_end = end;
 
-	on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
+	on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index a9c78bc..7640867 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -1,11 +1,26 @@
 if ARCH_DAVINCI
 
+config AINTC
+	bool
+
+config CP_INTC
+	bool
+
 menu "TI DaVinci Implementations"
 
 comment "DaVinci Core Type"
 
 config ARCH_DAVINCI_DM644x
 	bool "DaVinci 644x based system"
+	select AINTC
+
+config ARCH_DAVINCI_DM355
+        bool "DaVinci 355 based system"
+	select AINTC
+
+config ARCH_DAVINCI_DM646x
+        bool "DaVinci 646x based system"
+	select AINTC
 
 comment "DaVinci Board Type"
 
@@ -17,6 +32,38 @@
 	  Configure this option to specify the whether the board used
 	  for development is a DM644x EVM
 
+config MACH_SFFSDR
+	bool "Lyrtech SFFSDR"
+	default n
+	depends on ARCH_DAVINCI_DM644x
+	help
+	  Say Y here to select the Lyrtech Small Form Factor
+	  Software Defined Radio (SFFSDR) board.
+
+config MACH_DAVINCI_DM355_EVM
+	bool "TI DM355 EVM"
+	default n
+	depends on ARCH_DAVINCI_DM355
+	help
+	  Configure this option to specify the whether the board used
+	  for development is a DM355 EVM
+
+config MACH_DM355_LEOPARD
+	bool "DM355 Leopard board"
+	default n
+	depends on ARCH_DAVINCI_DM355
+	help
+	  Configure this option to specify the whether the board used
+	  for development is a DM355 Leopard board.
+
+config MACH_DAVINCI_DM6467_EVM
+	bool "TI DM6467 EVM"
+	default n
+	depends on ARCH_DAVINCI_DM646x
+	help
+	  Configure this option to specify the whether the board used
+	  for development is a DM6467 EVM
+
 
 config DAVINCI_MUX
 	bool "DAVINCI multiplexing support"
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 1674661..059ab78 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -4,13 +4,22 @@
 #
 
 # Common objects
-obj-y 			:= time.o irq.o clock.o serial.o io.o id.o psc.o \
-			   gpio.o devices.o dma.o usb.o
+obj-y 			:= time.o clock.o serial.o io.o psc.o \
+			   gpio.o devices.o dma.o usb.o common.o sram.o
 
 obj-$(CONFIG_DAVINCI_MUX)		+= mux.o
 
 # Chip specific
 obj-$(CONFIG_ARCH_DAVINCI_DM644x)       += dm644x.o
+obj-$(CONFIG_ARCH_DAVINCI_DM355)        += dm355.o
+obj-$(CONFIG_ARCH_DAVINCI_DM646x)       += dm646x.o
+
+obj-$(CONFIG_AINTC)			+= irq.o
+obj-$(CONFIG_CP_INTC)			+= cp_intc.o
 
 # Board specific
 obj-$(CONFIG_MACH_DAVINCI_EVM)  	+= board-dm644x-evm.o
+obj-$(CONFIG_MACH_SFFSDR)		+= board-sffsdr.o
+obj-$(CONFIG_MACH_DAVINCI_DM355_EVM)	+= board-dm355-evm.o
+obj-$(CONFIG_MACH_DM355_LEOPARD)	+= board-dm355-leopard.o
+obj-$(CONFIG_MACH_DAVINCI_DM6467_EVM)	+= board-dm646x-evm.o
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
new file mode 100644
index 0000000..5ac2f56
--- /dev/null
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -0,0 +1,298 @@
+/*
+ * TI DaVinci EVM board support
+ *
+ * Author: Kevin Hilman, Deep Root Systems, LLC
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/eeprom.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <mach/hardware.h>
+#include <mach/dm355.h>
+#include <mach/psc.h>
+#include <mach/common.h>
+#include <mach/i2c.h>
+#include <mach/serial.h>
+#include <mach/nand.h>
+#include <mach/mmc.h>
+#include <mach/common.h>
+
+#define DAVINCI_ASYNC_EMIF_CONTROL_BASE		0x01e10000
+#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE	0x02000000
+
+/* NOTE:  this is geared for the standard config, with a socketed
+ * 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors.  If you
+ * swap chips, maybe with a different block size, partitioning may
+ * need to be changed.
+ */
+#define NAND_BLOCK_SIZE		SZ_128K
+
+static struct mtd_partition davinci_nand_partitions[] = {
+	{
+		/* UBL (a few copies) plus U-Boot */
+		.name		= "bootloader",
+		.offset		= 0,
+		.size		= 15 * NAND_BLOCK_SIZE,
+		.mask_flags	= MTD_WRITEABLE, /* force read-only */
+	}, {
+		/* U-Boot environment */
+		.name		= "params",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 1 * NAND_BLOCK_SIZE,
+		.mask_flags	= 0,
+	}, {
+		.name		= "kernel",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= SZ_4M,
+		.mask_flags	= 0,
+	}, {
+		.name		= "filesystem1",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= SZ_512M,
+		.mask_flags	= 0,
+	}, {
+		.name		= "filesystem2",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= MTDPART_SIZ_FULL,
+		.mask_flags	= 0,
+	}
+	/* two blocks with bad block table (and mirror) at the end */
+};
+
+static struct davinci_nand_pdata davinci_nand_data = {
+	.mask_chipsel		= BIT(14),
+	.parts			= davinci_nand_partitions,
+	.nr_parts		= ARRAY_SIZE(davinci_nand_partitions),
+	.ecc_mode		= NAND_ECC_HW_SYNDROME,
+	.options		= NAND_USE_FLASH_BBT,
+};
+
+static struct resource davinci_nand_resources[] = {
+	{
+		.start		= DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
+		.end		= DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
+		.flags		= IORESOURCE_MEM,
+	}, {
+		.start		= DAVINCI_ASYNC_EMIF_CONTROL_BASE,
+		.end		= DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device davinci_nand_device = {
+	.name			= "davinci_nand",
+	.id			= 0,
+
+	.num_resources		= ARRAY_SIZE(davinci_nand_resources),
+	.resource		= davinci_nand_resources,
+
+	.dev			= {
+		.platform_data	= &davinci_nand_data,
+	},
+};
+
+static struct davinci_i2c_platform_data i2c_pdata = {
+	.bus_freq	= 400	/* kHz */,
+	.bus_delay	= 0	/* usec */,
+};
+
+static int dm355evm_mmc_gpios = -EINVAL;
+
+static void dm355evm_mmcsd_gpios(unsigned gpio)
+{
+	gpio_request(gpio + 0, "mmc0_ro");
+	gpio_request(gpio + 1, "mmc0_cd");
+	gpio_request(gpio + 2, "mmc1_ro");
+	gpio_request(gpio + 3, "mmc1_cd");
+
+	/* we "know" these are input-only so we don't
+	 * need to call gpio_direction_input()
+	 */
+
+	dm355evm_mmc_gpios = gpio;
+}
+
+static struct i2c_board_info dm355evm_i2c_info[] = {
+	{ I2C_BOARD_INFO("dm355evm_msp", 0x25),
+		.platform_data = dm355evm_mmcsd_gpios,
+		/* plus irq */ },
+	/* { I2C_BOARD_INFO("tlv320aic3x", 0x1b), }, */
+	/* { I2C_BOARD_INFO("tvp5146", 0x5d), }, */
+};
+
+static void __init evm_init_i2c(void)
+{
+	davinci_init_i2c(&i2c_pdata);
+
+	gpio_request(5, "dm355evm_msp");
+	gpio_direction_input(5);
+	dm355evm_i2c_info[0].irq = gpio_to_irq(5);
+
+	i2c_register_board_info(1, dm355evm_i2c_info,
+			ARRAY_SIZE(dm355evm_i2c_info));
+}
+
+static struct resource dm355evm_dm9000_rsrc[] = {
+	{
+		/* addr */
+		.start	= 0x04014000,
+		.end	= 0x04014001,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		/* data */
+		.start	= 0x04014002,
+		.end	= 0x04014003,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		.flags	= IORESOURCE_IRQ
+			| IORESOURCE_IRQ_HIGHEDGE /* rising (active high) */,
+	},
+};
+
+static struct platform_device dm355evm_dm9000 = {
+	.name		= "dm9000",
+	.id		= -1,
+	.resource	= dm355evm_dm9000_rsrc,
+	.num_resources	= ARRAY_SIZE(dm355evm_dm9000_rsrc),
+};
+
+static struct platform_device *davinci_evm_devices[] __initdata = {
+	&dm355evm_dm9000,
+	&davinci_nand_device,
+};
+
+static struct davinci_uart_config uart_config __initdata = {
+	.enabled_uarts = (1 << 0),
+};
+
+static void __init dm355_evm_map_io(void)
+{
+	dm355_init();
+}
+
+static int dm355evm_mmc_get_cd(int module)
+{
+	if (!gpio_is_valid(dm355evm_mmc_gpios))
+		return -ENXIO;
+	/* low == card present */
+	return !gpio_get_value_cansleep(dm355evm_mmc_gpios + 2 * module + 1);
+}
+
+static int dm355evm_mmc_get_ro(int module)
+{
+	if (!gpio_is_valid(dm355evm_mmc_gpios))
+		return -ENXIO;
+	/* high == card's write protect switch active */
+	return gpio_get_value_cansleep(dm355evm_mmc_gpios + 2 * module + 0);
+}
+
+static struct davinci_mmc_config dm355evm_mmc_config = {
+	.get_cd		= dm355evm_mmc_get_cd,
+	.get_ro		= dm355evm_mmc_get_ro,
+	.wires		= 4,
+	.max_freq       = 50000000,
+	.caps           = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
+	.version	= MMC_CTLR_VERSION_1,
+};
+
+/* Don't connect anything to J10 unless you're only using USB host
+ * mode *and* have to do so with some kind of gender-bender.  If
+ * you have proper Mini-B or Mini-A cables (or Mini-A adapters)
+ * the ID pin won't need any help.
+ */
+#ifdef CONFIG_USB_MUSB_PERIPHERAL
+#define USB_ID_VALUE	0	/* ID pulled high; *should* float */
+#else
+#define USB_ID_VALUE	1	/* ID pulled low */
+#endif
+
+static struct spi_eeprom at25640a = {
+	.byte_len	= SZ_64K / 8,
+	.name		= "at25640a",
+	.page_size	= 32,
+	.flags		= EE_ADDR2,
+};
+
+static struct spi_board_info dm355_evm_spi_info[] __initconst = {
+	{
+		.modalias	= "at25",
+		.platform_data	= &at25640a,
+		.max_speed_hz	= 10 * 1000 * 1000,	/* at 3v3 */
+		.bus_num	= 0,
+		.chip_select	= 0,
+		.mode		= SPI_MODE_0,
+	},
+};
+
+static __init void dm355_evm_init(void)
+{
+	struct clk *aemif;
+
+	gpio_request(1, "dm9000");
+	gpio_direction_input(1);
+	dm355evm_dm9000_rsrc[2].start = gpio_to_irq(1);
+
+	aemif = clk_get(&dm355evm_dm9000.dev, "aemif");
+	if (IS_ERR(aemif))
+		WARN("%s: unable to get AEMIF clock\n", __func__);
+	else
+		clk_enable(aemif);
+
+	platform_add_devices(davinci_evm_devices,
+			     ARRAY_SIZE(davinci_evm_devices));
+	evm_init_i2c();
+	davinci_serial_init(&uart_config);
+
+	/* NOTE:  NAND flash timings set by the UBL are slower than
+	 * needed by MT29F16G08FAA chips ... EMIF.A1CR is 0x40400204
+	 * but could be 0x0400008c for about 25% faster page reads.
+	 */
+
+	gpio_request(2, "usb_id_toggle");
+	gpio_direction_output(2, USB_ID_VALUE);
+	/* irlml6401 switches over 1A in under 8 msec */
+	setup_usb(500, 8);
+
+	davinci_setup_mmc(0, &dm355evm_mmc_config);
+	davinci_setup_mmc(1, &dm355evm_mmc_config);
+
+	dm355_init_spi0(BIT(0), dm355_evm_spi_info,
+			ARRAY_SIZE(dm355_evm_spi_info));
+}
+
+static __init void dm355_evm_irq_init(void)
+{
+	davinci_irq_init();
+}
+
+MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
+	.phys_io      = IO_PHYS,
+	.io_pg_offst  = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
+	.boot_params  = (0x80000100),
+	.map_io	      = dm355_evm_map_io,
+	.init_irq     = dm355_evm_irq_init,
+	.timer	      = &davinci_timer,
+	.init_machine = dm355_evm_init,
+MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
new file mode 100644
index 0000000..28c9008
--- /dev/null
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -0,0 +1,296 @@
+/*
+ * DM355 leopard board support
+ *
+ * Based on board-dm355-evm.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/eeprom.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <mach/hardware.h>
+#include <mach/dm355.h>
+#include <mach/psc.h>
+#include <mach/common.h>
+#include <mach/i2c.h>
+#include <mach/serial.h>
+#include <mach/nand.h>
+#include <mach/mmc.h>
+#include <mach/common.h>
+
+#define DAVINCI_ASYNC_EMIF_CONTROL_BASE		0x01e10000
+#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE	0x02000000
+
+/* NOTE:  this is geared for the standard config, with a socketed
+ * 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors.  If you
+ * swap chips, maybe with a different block size, partitioning may
+ * need to be changed.
+ */
+#define NAND_BLOCK_SIZE		SZ_128K
+
+static struct mtd_partition davinci_nand_partitions[] = {
+	{
+		/* UBL (a few copies) plus U-Boot */
+		.name		= "bootloader",
+		.offset		= 0,
+		.size		= 15 * NAND_BLOCK_SIZE,
+		.mask_flags	= MTD_WRITEABLE, /* force read-only */
+	}, {
+		/* U-Boot environment */
+		.name		= "params",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 1 * NAND_BLOCK_SIZE,
+		.mask_flags	= 0,
+	}, {
+		.name		= "kernel",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= SZ_4M,
+		.mask_flags	= 0,
+	}, {
+		.name		= "filesystem1",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= SZ_512M,
+		.mask_flags	= 0,
+	}, {
+		.name		= "filesystem2",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= MTDPART_SIZ_FULL,
+		.mask_flags	= 0,
+	}
+	/* two blocks with bad block table (and mirror) at the end */
+};
+
+static struct davinci_nand_pdata davinci_nand_data = {
+	.mask_chipsel		= BIT(14),
+	.parts			= davinci_nand_partitions,
+	.nr_parts		= ARRAY_SIZE(davinci_nand_partitions),
+	.ecc_mode		= NAND_ECC_HW_SYNDROME,
+	.options		= NAND_USE_FLASH_BBT,
+};
+
+static struct resource davinci_nand_resources[] = {
+	{
+		.start		= DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
+		.end		= DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
+		.flags		= IORESOURCE_MEM,
+	}, {
+		.start		= DAVINCI_ASYNC_EMIF_CONTROL_BASE,
+		.end		= DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device davinci_nand_device = {
+	.name			= "davinci_nand",
+	.id			= 0,
+
+	.num_resources		= ARRAY_SIZE(davinci_nand_resources),
+	.resource		= davinci_nand_resources,
+
+	.dev			= {
+		.platform_data	= &davinci_nand_data,
+	},
+};
+
+static struct davinci_i2c_platform_data i2c_pdata = {
+	.bus_freq	= 400	/* kHz */,
+	.bus_delay	= 0	/* usec */,
+};
+
+static int leopard_mmc_gpio = -EINVAL;
+
+static void dm355leopard_mmcsd_gpios(unsigned gpio)
+{
+	gpio_request(gpio + 0, "mmc0_ro");
+	gpio_request(gpio + 1, "mmc0_cd");
+	gpio_request(gpio + 2, "mmc1_ro");
+	gpio_request(gpio + 3, "mmc1_cd");
+
+	/* we "know" these are input-only so we don't
+	 * need to call gpio_direction_input()
+	 */
+
+	leopard_mmc_gpio = gpio;
+}
+
+static struct i2c_board_info dm355leopard_i2c_info[] = {
+	{ I2C_BOARD_INFO("dm355leopard_msp", 0x25),
+		.platform_data = dm355leopard_mmcsd_gpios,
+		/* plus irq */ },
+	/* { I2C_BOARD_INFO("tlv320aic3x", 0x1b), }, */
+	/* { I2C_BOARD_INFO("tvp5146", 0x5d), }, */
+};
+
+static void __init leopard_init_i2c(void)
+{
+	davinci_init_i2c(&i2c_pdata);
+
+	gpio_request(5, "dm355leopard_msp");
+	gpio_direction_input(5);
+	dm355leopard_i2c_info[0].irq = gpio_to_irq(5);
+
+	i2c_register_board_info(1, dm355leopard_i2c_info,
+			ARRAY_SIZE(dm355leopard_i2c_info));
+}
+
+static struct resource dm355leopard_dm9000_rsrc[] = {
+	{
+		/* addr */
+		.start	= 0x04000000,
+		.end	= 0x04000001,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		/* data */
+		.start	= 0x04000016,
+		.end	= 0x04000017,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		.flags	= IORESOURCE_IRQ
+			| IORESOURCE_IRQ_HIGHEDGE /* rising (active high) */,
+	},
+};
+
+static struct platform_device dm355leopard_dm9000 = {
+	.name		= "dm9000",
+	.id		= -1,
+	.resource	= dm355leopard_dm9000_rsrc,
+	.num_resources	= ARRAY_SIZE(dm355leopard_dm9000_rsrc),
+};
+
+static struct platform_device *davinci_leopard_devices[] __initdata = {
+	&dm355leopard_dm9000,
+	&davinci_nand_device,
+};
+
+static struct davinci_uart_config uart_config __initdata = {
+	.enabled_uarts = (1 << 0),
+};
+
+static void __init dm355_leopard_map_io(void)
+{
+	dm355_init();
+}
+
+static int dm355leopard_mmc_get_cd(int module)
+{
+	if (!gpio_is_valid(leopard_mmc_gpio))
+		return -ENXIO;
+	/* low == card present */
+	return !gpio_get_value_cansleep(leopard_mmc_gpio + 2 * module + 1);
+}
+
+static int dm355leopard_mmc_get_ro(int module)
+{
+	if (!gpio_is_valid(leopard_mmc_gpio))
+		return -ENXIO;
+	/* high == card's write protect switch active */
+	return gpio_get_value_cansleep(leopard_mmc_gpio + 2 * module + 0);
+}
+
+static struct davinci_mmc_config dm355leopard_mmc_config = {
+	.get_cd		= dm355leopard_mmc_get_cd,
+	.get_ro		= dm355leopard_mmc_get_ro,
+	.wires		= 4,
+	.max_freq       = 50000000,
+	.caps           = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
+};
+
+/* Don't connect anything to J10 unless you're only using USB host
+ * mode *and* have to do so with some kind of gender-bender.  If
+ * you have proper Mini-B or Mini-A cables (or Mini-A adapters)
+ * the ID pin won't need any help.
+ */
+#ifdef CONFIG_USB_MUSB_PERIPHERAL
+#define USB_ID_VALUE	0	/* ID pulled high; *should* float */
+#else
+#define USB_ID_VALUE	1	/* ID pulled low */
+#endif
+
+static struct spi_eeprom at25640a = {
+	.byte_len	= SZ_64K / 8,
+	.name		= "at25640a",
+	.page_size	= 32,
+	.flags		= EE_ADDR2,
+};
+
+static struct spi_board_info dm355_leopard_spi_info[] __initconst = {
+	{
+		.modalias	= "at25",
+		.platform_data	= &at25640a,
+		.max_speed_hz	= 10 * 1000 * 1000,	/* at 3v3 */
+		.bus_num	= 0,
+		.chip_select	= 0,
+		.mode		= SPI_MODE_0,
+	},
+};
+
+static __init void dm355_leopard_init(void)
+{
+	struct clk *aemif;
+
+	gpio_request(9, "dm9000");
+	gpio_direction_input(9);
+	dm355leopard_dm9000_rsrc[2].start = gpio_to_irq(9);
+
+	aemif = clk_get(&dm355leopard_dm9000.dev, "aemif");
+	if (IS_ERR(aemif))
+		WARN("%s: unable to get AEMIF clock\n", __func__);
+	else
+		clk_enable(aemif);
+
+	platform_add_devices(davinci_leopard_devices,
+			     ARRAY_SIZE(davinci_leopard_devices));
+	leopard_init_i2c();
+	davinci_serial_init(&uart_config);
+
+	/* NOTE:  NAND flash timings set by the UBL are slower than
+	 * needed by MT29F16G08FAA chips ... EMIF.A1CR is 0x40400204
+	 * but could be 0x0400008c for about 25% faster page reads.
+	 */
+
+	gpio_request(2, "usb_id_toggle");
+	gpio_direction_output(2, USB_ID_VALUE);
+	/* irlml6401 switches over 1A in under 8 msec */
+	setup_usb(500, 8);
+
+	davinci_setup_mmc(0, &dm355leopard_mmc_config);
+	davinci_setup_mmc(1, &dm355leopard_mmc_config);
+
+	dm355_init_spi0(BIT(0), dm355_leopard_spi_info,
+			ARRAY_SIZE(dm355_leopard_spi_info));
+}
+
+static __init void dm355_leopard_irq_init(void)
+{
+	davinci_irq_init();
+}
+
+MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
+	.phys_io      = IO_PHYS,
+	.io_pg_offst  = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
+	.boot_params  = (0x80000100),
+	.map_io	      = dm355_leopard_map_io,
+	.init_irq     = dm355_leopard_irq_init,
+	.timer	      = &davinci_timer,
+	.init_machine = dm355_leopard_init,
+MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index b2e7f9c..d9d4045 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -16,12 +16,11 @@
 #include <linux/gpio.h>
 #include <linux/leds.h>
 #include <linux/memory.h>
-#include <linux/etherdevice.h>
 
 #include <linux/i2c.h>
 #include <linux/i2c/pcf857x.h>
 #include <linux/i2c/at24.h>
-
+#include <linux/etherdevice.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
@@ -44,6 +43,9 @@
 #include <mach/mux.h>
 #include <mach/psc.h>
 #include <mach/nand.h>
+#include <mach/mmc.h>
+#include <mach/emac.h>
+#include <mach/common.h>
 
 #define DM644X_EVM_PHY_MASK		(0x2)
 #define DM644X_EVM_MDIO_FREQUENCY	(2200000) /* PHY bus frequency */
@@ -436,45 +438,15 @@
  *  - 0x0039, 1 byte NTSC vs PAL (bit 0x80 == PAL)
  *  - ... newer boards may have more
  */
-static struct memory_accessor *at24_mem_acc;
-
-static void at24_setup(struct memory_accessor *mem_acc, void *context)
-{
-	DECLARE_MAC_BUF(mac_str);
-	char mac_addr[6];
-
-	at24_mem_acc = mem_acc;
-
-	/* Read MAC addr from EEPROM */
-	if (at24_mem_acc->read(at24_mem_acc, mac_addr, 0x7f00, 6) == 6) {
-		printk(KERN_INFO "Read MAC addr from EEPROM: %s\n",
-		       print_mac(mac_str, mac_addr));
-	}
-}
 
 static struct at24_platform_data eeprom_info = {
 	.byte_len	= (256*1024) / 8,
 	.page_size	= 64,
 	.flags		= AT24_FLAG_ADDR16,
-	.setup          = at24_setup,
+	.setup          = davinci_get_mac_addr,
+	.context	= (void *)0x7f00,
 };
 
-int dm6446evm_eeprom_read(void *buf, off_t off, size_t count)
-{
-	if (at24_mem_acc)
-		return at24_mem_acc->read(at24_mem_acc, buf, off, count);
-	return -ENODEV;
-}
-EXPORT_SYMBOL(dm6446evm_eeprom_read);
-
-int dm6446evm_eeprom_write(void *buf, off_t off, size_t count)
-{
-	if (at24_mem_acc)
-		return at24_mem_acc->write(at24_mem_acc, buf, off, count);
-	return -ENODEV;
-}
-EXPORT_SYMBOL(dm6446evm_eeprom_write);
-
 /*
  * MSP430 supports RTC, card detection, input from IR remote, and
  * a bit more.  It triggers interrupts on GPIO(7) from pressing
@@ -545,6 +517,27 @@
 	return (buf[3] << 8) | buf[2];
 }
 
+static int dm6444evm_mmc_get_cd(int module)
+{
+	int status = dm6444evm_msp430_get_pins();
+
+	return (status < 0) ? status : !(status & BIT(1));
+}
+
+static int dm6444evm_mmc_get_ro(int module)
+{
+	int status = dm6444evm_msp430_get_pins();
+
+	return (status < 0) ? status : status & BIT(6 + 8);
+}
+
+static struct davinci_mmc_config dm6446evm_mmc_config = {
+	.get_cd		= dm6444evm_mmc_get_cd,
+	.get_ro		= dm6444evm_mmc_get_ro,
+	.wires		= 4,
+	.version	= MMC_CTLR_VERSION_1
+};
+
 static struct i2c_board_info __initdata i2c_info[] =  {
 	{
 		I2C_BOARD_INFO("dm6446evm_msp", 0x23),
@@ -598,7 +591,6 @@
 static void __init
 davinci_evm_map_io(void)
 {
-	davinci_map_common_io();
 	dm644x_init();
 }
 
@@ -639,6 +631,7 @@
 static __init void davinci_evm_init(void)
 {
 	struct clk *aemif_clk;
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
 
 	aemif_clk = clk_get(NULL, "aemif");
 	clk_enable(aemif_clk);
@@ -671,8 +664,13 @@
 			     ARRAY_SIZE(davinci_evm_devices));
 	evm_init_i2c();
 
+	davinci_setup_mmc(0, &dm6446evm_mmc_config);
+
 	davinci_serial_init(&uart_config);
 
+	soc_info->emac_pdata->phy_mask = DM644X_EVM_PHY_MASK;
+	soc_info->emac_pdata->mdio_max_freq = DM644X_EVM_MDIO_FREQUENCY;
+
 	/* Register the fixup for PHY on DaVinci */
 	phy_register_fixup_for_uid(LXT971_PHY_ID, LXT971_PHY_MASK,
 					davinci_phy_fixup);
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
new file mode 100644
index 0000000..e17de63
--- /dev/null
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -0,0 +1,262 @@
+/*
+ * TI DaVinci DM646X EVM board
+ *
+ * Derived from: arch/arm/mach-davinci/board-evm.c
+ * Copyright (C) 2006 Texas Instruments.
+ *
+ * (C) 2007-2008, MontaVista Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+/**************************************************************************
+ * Included Files
+ **************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/root_dev.h>
+#include <linux/dma-mapping.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/i2c/at24.h>
+#include <linux/i2c/pcf857x.h>
+#include <linux/etherdevice.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <mach/dm646x.h>
+#include <mach/common.h>
+#include <mach/psc.h>
+#include <mach/serial.h>
+#include <mach/i2c.h>
+#include <mach/mmc.h>
+#include <mach/emac.h>
+#include <mach/common.h>
+
+#define DM646X_EVM_PHY_MASK		(0x2)
+#define DM646X_EVM_MDIO_FREQUENCY	(2200000) /* PHY bus frequency */
+
+static struct davinci_uart_config uart_config __initdata = {
+	.enabled_uarts = (1 << 0),
+};
+
+/* LEDS */
+
+static struct gpio_led evm_leds[] = {
+	{ .name = "DS1", .active_low = 1, },
+	{ .name = "DS2", .active_low = 1, },
+	{ .name = "DS3", .active_low = 1, },
+	{ .name = "DS4", .active_low = 1, },
+};
+
+static __initconst struct gpio_led_platform_data evm_led_data = {
+	.num_leds = ARRAY_SIZE(evm_leds),
+	.leds     = evm_leds,
+};
+
+static struct platform_device *evm_led_dev;
+
+static int evm_led_setup(struct i2c_client *client, int gpio,
+			unsigned int ngpio, void *c)
+{
+	struct gpio_led *leds = evm_leds;
+	int status;
+
+	while (ngpio--) {
+		leds->gpio = gpio++;
+		leds++;
+	};
+
+	evm_led_dev = platform_device_alloc("leds-gpio", 0);
+	platform_device_add_data(evm_led_dev, &evm_led_data,
+				sizeof(evm_led_data));
+
+	evm_led_dev->dev.parent = &client->dev;
+	status = platform_device_add(evm_led_dev);
+	if (status < 0) {
+		platform_device_put(evm_led_dev);
+		evm_led_dev = NULL;
+	}
+	return status;
+}
+
+static int evm_led_teardown(struct i2c_client *client, int gpio,
+				unsigned ngpio, void *c)
+{
+	if (evm_led_dev) {
+		platform_device_unregister(evm_led_dev);
+		evm_led_dev = NULL;
+	}
+	return 0;
+}
+
+static int evm_sw_gpio[4] = { -EINVAL, -EINVAL, -EINVAL, -EINVAL };
+
+static int evm_sw_setup(struct i2c_client *client, int gpio,
+			unsigned ngpio, void *c)
+{
+	int status;
+	int i;
+	char label[10];
+
+	for (i = 0; i < 4; ++i) {
+		snprintf(label, 10, "user_sw%d", i);
+		status = gpio_request(gpio, label);
+		if (status)
+			goto out_free;
+		evm_sw_gpio[i] = gpio++;
+
+		status = gpio_direction_input(evm_sw_gpio[i]);
+		if (status) {
+			gpio_free(evm_sw_gpio[i]);
+			evm_sw_gpio[i] = -EINVAL;
+			goto out_free;
+		}
+
+		status = gpio_export(evm_sw_gpio[i], 0);
+		if (status) {
+			gpio_free(evm_sw_gpio[i]);
+			evm_sw_gpio[i] = -EINVAL;
+			goto out_free;
+		}
+	}
+	return status;
+out_free:
+	for (i = 0; i < 4; ++i) {
+		if (evm_sw_gpio[i] != -EINVAL) {
+			gpio_free(evm_sw_gpio[i]);
+			evm_sw_gpio[i] = -EINVAL;
+		}
+	}
+	return status;
+}
+
+static int evm_sw_teardown(struct i2c_client *client, int gpio,
+			unsigned ngpio, void *c)
+{
+	int i;
+
+	for (i = 0; i < 4; ++i) {
+		if (evm_sw_gpio[i] != -EINVAL) {
+			gpio_unexport(evm_sw_gpio[i]);
+			gpio_free(evm_sw_gpio[i]);
+			evm_sw_gpio[i] = -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int evm_pcf_setup(struct i2c_client *client, int gpio,
+			unsigned int ngpio, void *c)
+{
+	int status;
+
+	if (ngpio < 8)
+		return -EINVAL;
+
+	status = evm_sw_setup(client, gpio, 4, c);
+	if (status)
+		return status;
+
+	return evm_led_setup(client, gpio+4, 4, c);
+}
+
+static int evm_pcf_teardown(struct i2c_client *client, int gpio,
+			unsigned int ngpio, void *c)
+{
+	BUG_ON(ngpio < 8);
+
+	evm_sw_teardown(client, gpio, 4, c);
+	evm_led_teardown(client, gpio+4, 4, c);
+
+	return 0;
+}
+
+static struct pcf857x_platform_data pcf_data = {
+	.gpio_base	= DAVINCI_N_GPIO+1,
+	.setup		= evm_pcf_setup,
+	.teardown	= evm_pcf_teardown,
+};
+
+/* Most of this EEPROM is unused, but U-Boot uses some data:
+ *  - 0x7f00, 6 bytes Ethernet Address
+ *  - ... newer boards may have more
+ */
+
+static struct at24_platform_data eeprom_info = {
+	.byte_len       = (256*1024) / 8,
+	.page_size      = 64,
+	.flags          = AT24_FLAG_ADDR16,
+	.setup          = davinci_get_mac_addr,
+	.context	= (void *)0x7f00,
+};
+
+static struct i2c_board_info __initdata i2c_info[] =  {
+	{
+		I2C_BOARD_INFO("24c256", 0x50),
+		.platform_data  = &eeprom_info,
+	},
+	{
+		I2C_BOARD_INFO("pcf8574a", 0x38),
+		.platform_data	= &pcf_data,
+	},
+};
+
+static struct davinci_i2c_platform_data i2c_pdata = {
+	.bus_freq       = 100 /* kHz */,
+	.bus_delay      = 0 /* usec */,
+};
+
+static void __init evm_init_i2c(void)
+{
+	davinci_init_i2c(&i2c_pdata);
+	i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info));
+}
+
+static void __init davinci_map_io(void)
+{
+	dm646x_init();
+}
+
+static __init void evm_init(void)
+{
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
+
+	evm_init_i2c();
+	davinci_serial_init(&uart_config);
+
+	soc_info->emac_pdata->phy_mask = DM646X_EVM_PHY_MASK;
+	soc_info->emac_pdata->mdio_max_freq = DM646X_EVM_MDIO_FREQUENCY;
+}
+
+static __init void davinci_dm646x_evm_irq_init(void)
+{
+	davinci_irq_init();
+}
+
+MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
+	.phys_io      = IO_PHYS,
+	.io_pg_offst  = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
+	.boot_params  = (0x80000100),
+	.map_io       = davinci_map_io,
+	.init_irq     = davinci_dm646x_evm_irq_init,
+	.timer        = &davinci_timer,
+	.init_machine = evm_init,
+MACHINE_END
+
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
new file mode 100644
index 0000000..748a8e4
--- /dev/null
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -0,0 +1,189 @@
+/*
+ * Lyrtech SFFSDR board support.
+ *
+ * Copyright (C) 2008 Philip Balister, OpenSDR <philip@opensdr.com>
+ * Copyright (C) 2008 Lyrtech <www.lyrtech.com>
+ *
+ * Based on DV-EVM platform, original copyright follows:
+ *
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c/at24.h>
+#include <linux/etherdevice.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/io.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <mach/dm644x.h>
+#include <mach/common.h>
+#include <mach/i2c.h>
+#include <mach/serial.h>
+#include <mach/psc.h>
+#include <mach/mux.h>
+#include <mach/common.h>
+
+#define SFFSDR_PHY_MASK		(0x2)
+#define SFFSDR_MDIO_FREQUENCY	(2200000) /* PHY bus frequency */
+
+#define DAVINCI_ASYNC_EMIF_CONTROL_BASE   0x01e00000
+#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE  0x02000000
+
+struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
+	/* U-Boot Environment: Block 0
+	 * UBL:                Block 1
+	 * U-Boot:             Blocks 6-7 (256 kb)
+	 * Integrity Kernel:   Blocks 8-31 (3 Mb)
+	 * Integrity Data:     Blocks 100-END
+	 */
+	{
+		.name		= "Linux Kernel",
+		.offset		= 32 * SZ_128K,
+		.size		= 16 * SZ_128K, /* 2 Mb */
+		.mask_flags	= MTD_WRITEABLE, /* Force read-only */
+	},
+	{
+		.name		= "Linux ROOT",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 256 * SZ_128K, /* 32 Mb */
+		.mask_flags	= 0, /* R/W */
+	},
+};
+
+static struct flash_platform_data davinci_sffsdr_nandflash_data = {
+	.parts		= davinci_sffsdr_nandflash_partition,
+	.nr_parts	= ARRAY_SIZE(davinci_sffsdr_nandflash_partition),
+};
+
+static struct resource davinci_sffsdr_nandflash_resource[] = {
+	{
+		.start		= DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
+		.end		= DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+		.flags		= IORESOURCE_MEM,
+	}, {
+		.start		= DAVINCI_ASYNC_EMIF_CONTROL_BASE,
+		.end		= DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device davinci_sffsdr_nandflash_device = {
+	.name		= "davinci_nand", /* Name of driver */
+	.id		= 0,
+	.dev		= {
+		.platform_data	= &davinci_sffsdr_nandflash_data,
+	},
+	.num_resources	= ARRAY_SIZE(davinci_sffsdr_nandflash_resource),
+	.resource	= davinci_sffsdr_nandflash_resource,
+};
+
+static struct emac_platform_data sffsdr_emac_pdata = {
+	.phy_mask	= SFFSDR_PHY_MASK,
+	.mdio_max_freq	= SFFSDR_MDIO_FREQUENCY,
+};
+
+static struct at24_platform_data eeprom_info = {
+	.byte_len	= (64*1024) / 8,
+	.page_size	= 32,
+	.flags		= AT24_FLAG_ADDR16,
+};
+
+static struct i2c_board_info __initdata i2c_info[] =  {
+	{
+		I2C_BOARD_INFO("24lc64", 0x50),
+		.platform_data	= &eeprom_info,
+	},
+	/* Other I2C devices:
+	 * MSP430,  addr 0x23 (not used)
+	 * PCA9543, addr 0x70 (setup done by U-Boot)
+	 * ADS7828, addr 0x48 (ADC for voltage monitoring.)
+	 */
+};
+
+static struct davinci_i2c_platform_data i2c_pdata = {
+	.bus_freq	= 20 /* kHz */,
+	.bus_delay	= 100 /* usec */,
+};
+
+static void __init sffsdr_init_i2c(void)
+{
+	davinci_init_i2c(&i2c_pdata);
+	i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info));
+}
+
+static struct platform_device *davinci_sffsdr_devices[] __initdata = {
+	&davinci_sffsdr_nandflash_device,
+};
+
+static struct davinci_uart_config uart_config __initdata = {
+	.enabled_uarts = (1 << 0),
+};
+
+static void __init davinci_sffsdr_map_io(void)
+{
+	dm644x_init();
+}
+
+static __init void davinci_sffsdr_init(void)
+{
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
+
+	platform_add_devices(davinci_sffsdr_devices,
+			     ARRAY_SIZE(davinci_sffsdr_devices));
+	sffsdr_init_i2c();
+	davinci_serial_init(&uart_config);
+	soc_info->emac_pdata->phy_mask = SFFSDR_PHY_MASK;
+	soc_info->emac_pdata->mdio_max_freq = SFFSDR_MDIO_FREQUENCY;
+	setup_usb(0, 0); /* We support only peripheral mode. */
+
+	/* mux VLYNQ pins */
+	davinci_cfg_reg(DM644X_VLYNQEN);
+	davinci_cfg_reg(DM644X_VLYNQWD);
+}
+
+static __init void davinci_sffsdr_irq_init(void)
+{
+	davinci_irq_init();
+}
+
+MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
+	/* Maintainer: Hugo Villeneuve hugo.villeneuve@lyrtech.com */
+	.phys_io      = IO_PHYS,
+	.io_pg_offst  = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
+	.boot_params  = (DAVINCI_DDR_BASE + 0x100),
+	.map_io	      = davinci_sffsdr_map_io,
+	.init_irq     = davinci_sffsdr_irq_init,
+	.timer	      = &davinci_timer,
+	.init_machine = davinci_sffsdr_init,
+MACHINE_END
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index f0baaa1..39bf321 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -42,7 +42,8 @@
 	if (clk->parent)
 		__clk_enable(clk->parent);
 	if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
-		davinci_psc_config(psc_domain(clk), clk->lpsc, 1);
+		davinci_psc_config(psc_domain(clk), clk->psc_ctlr,
+				clk->lpsc, 1);
 }
 
 static void __clk_disable(struct clk *clk)
@@ -50,7 +51,8 @@
 	if (WARN_ON(clk->usecount == 0))
 		return;
 	if (--clk->usecount == 0 && !(clk->flags & CLK_PLL))
-		davinci_psc_config(psc_domain(clk), clk->lpsc, 0);
+		davinci_psc_config(psc_domain(clk), clk->psc_ctlr,
+				clk->lpsc, 0);
 	if (clk->parent)
 		__clk_disable(clk->parent);
 }
@@ -164,11 +166,11 @@
 			continue;
 
 		/* ignore if in Disabled or SwRstDisable states */
-		if (!davinci_psc_is_clk_active(ck->lpsc))
+		if (!davinci_psc_is_clk_active(ck->psc_ctlr, ck->lpsc))
 			continue;
 
 		pr_info("Clocks: disable unused %s\n", ck->name);
-		davinci_psc_config(psc_domain(ck), ck->lpsc, 0);
+		davinci_psc_config(psc_domain(ck), ck->psc_ctlr, ck->lpsc, 0);
 	}
 	spin_unlock_irq(&clockfw_lock);
 
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index 35736ec..27233cb 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -67,6 +67,7 @@
 	u8			usecount;
 	u8			flags;
 	u8			lpsc;
+	u8			psc_ctlr;
 	struct clk              *parent;
 	struct pll_data         *pll_data;
 	u32                     div_reg;
@@ -93,4 +94,7 @@
 	}
 
 int davinci_clk_init(struct davinci_clk *clocks);
+
+extern struct platform_device davinci_wdt_device;
+
 #endif
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
new file mode 100644
index 0000000..61ede19
--- /dev/null
+++ b/arch/arm/mach-davinci/common.c
@@ -0,0 +1,108 @@
+/*
+ * Code commons to all DaVinci SoCs.
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2009 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/etherdevice.h>
+
+#include <asm/tlb.h>
+#include <asm/mach/map.h>
+
+#include <mach/common.h>
+#include <mach/cputype.h>
+#include <mach/emac.h>
+
+#include "clock.h"
+
+struct davinci_soc_info davinci_soc_info;
+EXPORT_SYMBOL(davinci_soc_info);
+
+void __iomem *davinci_intc_base;
+int davinci_intc_type;
+
+void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context)
+{
+	char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
+	off_t offset = (off_t)context;
+
+	/* Read MAC addr from EEPROM */
+	if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN)
+		pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
+}
+
+static struct davinci_id * __init davinci_get_id(u32 jtag_id)
+{
+	int i;
+	struct davinci_id *dip;
+	u8 variant = (jtag_id & 0xf0000000) >> 28;
+	u16 part_no = (jtag_id & 0x0ffff000) >> 12;
+
+	for (i = 0, dip = davinci_soc_info.ids; i < davinci_soc_info.ids_num;
+			i++, dip++)
+		/* Don't care about the manufacturer right now */
+		if ((dip->part_no == part_no) && (dip->variant == variant))
+			return dip;
+
+	return NULL;
+}
+
+void __init davinci_common_init(struct davinci_soc_info *soc_info)
+{
+	int ret;
+	struct davinci_id *dip;
+
+	if (!soc_info) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	memcpy(&davinci_soc_info, soc_info, sizeof(struct davinci_soc_info));
+
+	if (davinci_soc_info.io_desc && (davinci_soc_info.io_desc_num > 0))
+		iotable_init(davinci_soc_info.io_desc,
+				davinci_soc_info.io_desc_num);
+
+	/*
+	 * Normally devicemaps_init() would flush caches and tlb after
+	 * mdesc->map_io(), but we must also do it here because of the CPU
+	 * revision check below.
+	 */
+	local_flush_tlb_all();
+	flush_cache_all();
+
+	/*
+	 * We want to check CPU revision early for cpu_is_xxxx() macros.
+	 * IO space mapping must be initialized before we can do that.
+	 */
+	davinci_soc_info.jtag_id = __raw_readl(davinci_soc_info.jtag_id_base);
+
+	dip = davinci_get_id(davinci_soc_info.jtag_id);
+	if (!dip) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	davinci_soc_info.cpu_id = dip->cpu_id;
+	pr_info("DaVinci %s variant 0x%x\n", dip->name, dip->variant);
+
+	if (davinci_soc_info.cpu_clks) {
+		ret = davinci_clk_init(davinci_soc_info.cpu_clks);
+
+		if (ret != 0)
+			goto err;
+	}
+
+	davinci_intc_base = davinci_soc_info.intc_base;
+	davinci_intc_type = davinci_soc_info.intc_type;
+	return;
+
+err:
+	pr_err("davinci_common_init: SoC Initialization failed\n");
+}
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
new file mode 100644
index 0000000..96c8e97
--- /dev/null
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -0,0 +1,161 @@
+/*
+ * TI Common Platform Interrupt Controller (cp_intc) driver
+ *
+ * Author: Steve Chen <schen@mvista.com>
+ * Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <mach/cp_intc.h>
+
+static void __iomem *cp_intc_base;
+
+static inline unsigned int cp_intc_read(unsigned offset)
+{
+	return __raw_readl(cp_intc_base + offset);
+}
+
+static inline void cp_intc_write(unsigned long value, unsigned offset)
+{
+	__raw_writel(value, cp_intc_base + offset);
+}
+
+static void cp_intc_ack_irq(unsigned int irq)
+{
+	cp_intc_write(irq, CP_INTC_SYS_STAT_IDX_CLR);
+}
+
+/* Disable interrupt */
+static void cp_intc_mask_irq(unsigned int irq)
+{
+	/* XXX don't know why we need to disable nIRQ here... */
+	cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_CLR);
+	cp_intc_write(irq, CP_INTC_SYS_ENABLE_IDX_CLR);
+	cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
+}
+
+/* Enable interrupt */
+static void cp_intc_unmask_irq(unsigned int irq)
+{
+	cp_intc_write(irq, CP_INTC_SYS_ENABLE_IDX_SET);
+}
+
+static int cp_intc_set_irq_type(unsigned int irq, unsigned int flow_type)
+{
+	unsigned reg		= BIT_WORD(irq);
+	unsigned mask		= BIT_MASK(irq);
+	unsigned polarity	= cp_intc_read(CP_INTC_SYS_POLARITY(reg));
+	unsigned type		= cp_intc_read(CP_INTC_SYS_TYPE(reg));
+
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_RISING:
+		polarity |= mask;
+		type |= mask;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		polarity &= ~mask;
+		type |= mask;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		polarity |= mask;
+		type &= ~mask;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		polarity &= ~mask;
+		type &= ~mask;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	cp_intc_write(polarity, CP_INTC_SYS_POLARITY(reg));
+	cp_intc_write(type, CP_INTC_SYS_TYPE(reg));
+
+	return 0;
+}
+
+static struct irq_chip cp_intc_irq_chip = {
+	.name		= "cp_intc",
+	.ack		= cp_intc_ack_irq,
+	.mask		= cp_intc_mask_irq,
+	.unmask		= cp_intc_unmask_irq,
+	.set_type	= cp_intc_set_irq_type,
+};
+
+void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
+			 u8 *irq_prio)
+{
+	unsigned num_reg	= BITS_TO_LONGS(num_irq);
+	int i;
+
+	cp_intc_base = base;
+
+	cp_intc_write(0, CP_INTC_GLOBAL_ENABLE);
+
+	/* Disable all host interrupts */
+	cp_intc_write(0, CP_INTC_HOST_ENABLE(0));
+
+	/* Disable system interrupts */
+	for (i = 0; i < num_reg; i++)
+		cp_intc_write(~0, CP_INTC_SYS_ENABLE_CLR(i));
+
+	/* Set to normal mode, no nesting, no priority hold */
+	cp_intc_write(0, CP_INTC_CTRL);
+	cp_intc_write(0, CP_INTC_HOST_CTRL);
+
+	/* Clear system interrupt status */
+	for (i = 0; i < num_reg; i++)
+		cp_intc_write(~0, CP_INTC_SYS_STAT_CLR(i));
+
+	/* Enable nIRQ (what about nFIQ?) */
+	cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
+
+	/*
+	 * Priority is determined by host channel: lower channel number has
+	 * higher priority i.e. channel 0 has highest priority and channel 31
+	 * had the lowest priority.
+	 */
+	num_reg = (num_irq + 3) >> 2;	/* 4 channels per register */
+	if (irq_prio) {
+		unsigned j, k;
+		u32 val;
+
+		for (k = i = 0; i < num_reg; i++) {
+			for (val = j = 0; j < 4; j++, k++) {
+				val >>= 8;
+				if (k < num_irq)
+					val |= irq_prio[k] << 24;
+			}
+
+			cp_intc_write(val, CP_INTC_CHAN_MAP(i));
+		}
+	} else	{
+		/*
+		 * Default everything to channel 15 if priority not specified.
+		 * Note that channel 0-1 are mapped to nFIQ and channels 2-31
+		 * are mapped to nIRQ.
+		 */
+		for (i = 0; i < num_reg; i++)
+			cp_intc_write(0x0f0f0f0f, CP_INTC_CHAN_MAP(i));
+	}
+
+	/* Set up genirq dispatching for cp_intc */
+	for (i = 0; i < num_irq; i++) {
+		set_irq_chip(i, &cp_intc_irq_chip);
+		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+		set_irq_handler(i, handle_edge_irq);
+	}
+
+	/* Enable global interrupt */
+	cp_intc_write(1, CP_INTC_GLOBAL_ENABLE);
+}
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index a31370b..c85091c 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -23,8 +23,14 @@
 #include <mach/irqs.h>
 #include <mach/cputype.h>
 #include <mach/mux.h>
+#include <mach/edma.h>
+#include <mach/mmc.h>
+#include <mach/time.h>
 
 #define DAVINCI_I2C_BASE	     0x01C21000
+#define DAVINCI_MMCSD0_BASE	     0x01E10000
+#define DM355_MMCSD0_BASE	     0x01E11000
+#define DM355_MMCSD1_BASE	     0x01E00000
 
 static struct resource i2c_resources[] = {
 	{
@@ -54,3 +60,208 @@
 	(void) platform_device_register(&davinci_i2c_device);
 }
 
+#if	defined(CONFIG_MMC_DAVINCI) || defined(CONFIG_MMC_DAVINCI_MODULE)
+
+static u64 mmcsd0_dma_mask = DMA_32BIT_MASK;
+
+static struct resource mmcsd0_resources[] = {
+	{
+		/* different on dm355 */
+		.start = DAVINCI_MMCSD0_BASE,
+		.end   = DAVINCI_MMCSD0_BASE + SZ_4K - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	/* IRQs:  MMC/SD, then SDIO */
+	{
+		.start = IRQ_MMCINT,
+		.flags = IORESOURCE_IRQ,
+	}, {
+		/* different on dm355 */
+		.start = IRQ_SDIOINT,
+		.flags = IORESOURCE_IRQ,
+	},
+	/* DMA channels: RX, then TX */
+	{
+		.start = DAVINCI_DMA_MMCRXEVT,
+		.flags = IORESOURCE_DMA,
+	}, {
+		.start = DAVINCI_DMA_MMCTXEVT,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device davinci_mmcsd0_device = {
+	.name = "davinci_mmc",
+	.id = 0,
+	.dev = {
+		.dma_mask = &mmcsd0_dma_mask,
+		.coherent_dma_mask = DMA_32BIT_MASK,
+	},
+	.num_resources = ARRAY_SIZE(mmcsd0_resources),
+	.resource = mmcsd0_resources,
+};
+
+static u64 mmcsd1_dma_mask = DMA_32BIT_MASK;
+
+static struct resource mmcsd1_resources[] = {
+	{
+		.start = DM355_MMCSD1_BASE,
+		.end   = DM355_MMCSD1_BASE + SZ_4K - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	/* IRQs:  MMC/SD, then SDIO */
+	{
+		.start = IRQ_DM355_MMCINT1,
+		.flags = IORESOURCE_IRQ,
+	}, {
+		.start = IRQ_DM355_SDIOINT1,
+		.flags = IORESOURCE_IRQ,
+	},
+	/* DMA channels: RX, then TX */
+	{
+		.start = 30,	/* rx */
+		.flags = IORESOURCE_DMA,
+	}, {
+		.start = 31,	/* tx */
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device davinci_mmcsd1_device = {
+	.name = "davinci_mmc",
+	.id = 1,
+	.dev = {
+		.dma_mask = &mmcsd1_dma_mask,
+		.coherent_dma_mask = DMA_32BIT_MASK,
+	},
+	.num_resources = ARRAY_SIZE(mmcsd1_resources),
+	.resource = mmcsd1_resources,
+};
+
+
+void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
+{
+	struct platform_device	*pdev = NULL;
+
+	if (WARN_ON(cpu_is_davinci_dm646x()))
+		return;
+
+	/* REVISIT: update PINMUX, ARM_IRQMUX, and EDMA_EVTMUX here too;
+	 * for example if MMCSD1 is used for SDIO, maybe DAT2 is unused.
+	 *
+	 * FIXME dm6441 (no MMC/SD), dm357 (one), and dm335 (two) are
+	 * not handled right here ...
+	 */
+	switch (module) {
+	case 1:
+		if (!cpu_is_davinci_dm355())
+			break;
+
+		/* REVISIT we may not need all these pins if e.g. this
+		 * is a hard-wired SDIO device...
+		 */
+		davinci_cfg_reg(DM355_SD1_CMD);
+		davinci_cfg_reg(DM355_SD1_CLK);
+		davinci_cfg_reg(DM355_SD1_DATA0);
+		davinci_cfg_reg(DM355_SD1_DATA1);
+		davinci_cfg_reg(DM355_SD1_DATA2);
+		davinci_cfg_reg(DM355_SD1_DATA3);
+
+		pdev = &davinci_mmcsd1_device;
+		break;
+	case 0:
+		if (cpu_is_davinci_dm355()) {
+			mmcsd0_resources[0].start = DM355_MMCSD0_BASE;
+			mmcsd0_resources[0].end = DM355_MMCSD0_BASE + SZ_4K - 1;
+			mmcsd0_resources[2].start = IRQ_DM355_SDIOINT0;
+
+			/* expose all 6 MMC0 signals:  CLK, CMD, DATA[0..3] */
+			davinci_cfg_reg(DM355_MMCSD0);
+
+			/* enable RX EDMA */
+			davinci_cfg_reg(DM355_EVT26_MMC0_RX);
+		}
+
+		else if (cpu_is_davinci_dm644x()) {
+			/* REVISIT: should this be in board-init code? */
+			void __iomem *base =
+				IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE);
+
+			/* Power-on 3.3V IO cells */
+			__raw_writel(0, base + DM64XX_VDD3P3V_PWDN);
+			/*Set up the pull regiter for MMC */
+			davinci_cfg_reg(DM644X_MSTK);
+		}
+
+		pdev = &davinci_mmcsd0_device;
+		break;
+	}
+
+	if (WARN_ON(!pdev))
+		return;
+
+	pdev->dev.platform_data = config;
+	platform_device_register(pdev);
+}
+
+#else
+
+void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
+{
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static struct resource wdt_resources[] = {
+	{
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+struct platform_device davinci_wdt_device = {
+	.name		= "watchdog",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(wdt_resources),
+	.resource	= wdt_resources,
+};
+
+static void davinci_init_wdt(void)
+{
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
+
+	wdt_resources[0].start = (resource_size_t)soc_info->wdt_base;
+	wdt_resources[0].end = (resource_size_t)soc_info->wdt_base + SZ_1K - 1;
+
+	platform_device_register(&davinci_wdt_device);
+}
+
+/*-------------------------------------------------------------------------*/
+
+struct davinci_timer_instance davinci_timer_instance[2] = {
+	{
+		.base		= IO_ADDRESS(DAVINCI_TIMER0_BASE),
+		.bottom_irq	= IRQ_TINT0_TINT12,
+		.top_irq	= IRQ_TINT0_TINT34,
+	},
+	{
+		.base		= IO_ADDRESS(DAVINCI_TIMER1_BASE),
+		.bottom_irq	= IRQ_TINT1_TINT12,
+		.top_irq	= IRQ_TINT1_TINT34,
+	},
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init davinci_init_devices(void)
+{
+	/* please keep these calls, and their implementations above,
+	 * in alphabetical order so they're easier to sort through.
+	 */
+	davinci_init_wdt();
+
+	return 0;
+}
+arch_initcall(davinci_init_devices);
+
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
new file mode 100644
index 0000000..baaaf32
--- /dev/null
+++ b/arch/arm/mach-davinci/dm355.c
@@ -0,0 +1,730 @@
+/*
+ * TI DaVinci DM355 chip specific setup
+ *
+ * Author: Kevin Hilman, Deep Root Systems, LLC
+ *
+ * 2007 (c) Deep Root Systems, LLC. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+
+#include <linux/spi/spi.h>
+
+#include <asm/mach/map.h>
+
+#include <mach/dm355.h>
+#include <mach/clock.h>
+#include <mach/cputype.h>
+#include <mach/edma.h>
+#include <mach/psc.h>
+#include <mach/mux.h>
+#include <mach/irqs.h>
+#include <mach/time.h>
+#include <mach/serial.h>
+#include <mach/common.h>
+
+#include "clock.h"
+#include "mux.h"
+
+#define DM355_UART2_BASE	(IO_PHYS + 0x206000)
+
+/*
+ * Device specific clocks
+ */
+#define DM355_REF_FREQ		24000000	/* 24 or 36 MHz */
+
+static struct pll_data pll1_data = {
+	.num       = 1,
+	.phys_base = DAVINCI_PLL1_BASE,
+	.flags     = PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
+};
+
+static struct pll_data pll2_data = {
+	.num       = 2,
+	.phys_base = DAVINCI_PLL2_BASE,
+	.flags     = PLL_HAS_PREDIV,
+};
+
+static struct clk ref_clk = {
+	.name = "ref_clk",
+	/* FIXME -- crystal rate is board-specific */
+	.rate = DM355_REF_FREQ,
+};
+
+static struct clk pll1_clk = {
+	.name = "pll1",
+	.parent = &ref_clk,
+	.flags = CLK_PLL,
+	.pll_data = &pll1_data,
+};
+
+static struct clk pll1_aux_clk = {
+	.name = "pll1_aux_clk",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL | PRE_PLL,
+};
+
+static struct clk pll1_sysclk1 = {
+	.name = "pll1_sysclk1",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV1,
+};
+
+static struct clk pll1_sysclk2 = {
+	.name = "pll1_sysclk2",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV2,
+};
+
+static struct clk pll1_sysclk3 = {
+	.name = "pll1_sysclk3",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV3,
+};
+
+static struct clk pll1_sysclk4 = {
+	.name = "pll1_sysclk4",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV4,
+};
+
+static struct clk pll1_sysclkbp = {
+	.name = "pll1_sysclkbp",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL | PRE_PLL,
+	.div_reg = BPDIV
+};
+
+static struct clk vpss_dac_clk = {
+	.name = "vpss_dac",
+	.parent = &pll1_sysclk3,
+	.lpsc = DM355_LPSC_VPSS_DAC,
+};
+
+static struct clk vpss_master_clk = {
+	.name = "vpss_master",
+	.parent = &pll1_sysclk4,
+	.lpsc = DAVINCI_LPSC_VPSSMSTR,
+	.flags = CLK_PSC,
+};
+
+static struct clk vpss_slave_clk = {
+	.name = "vpss_slave",
+	.parent = &pll1_sysclk4,
+	.lpsc = DAVINCI_LPSC_VPSSSLV,
+};
+
+
+static struct clk clkout1_clk = {
+	.name = "clkout1",
+	.parent = &pll1_aux_clk,
+	/* NOTE:  clkout1 can be externally gated by muxing GPIO-18 */
+};
+
+static struct clk clkout2_clk = {
+	.name = "clkout2",
+	.parent = &pll1_sysclkbp,
+};
+
+static struct clk pll2_clk = {
+	.name = "pll2",
+	.parent = &ref_clk,
+	.flags = CLK_PLL,
+	.pll_data = &pll2_data,
+};
+
+static struct clk pll2_sysclk1 = {
+	.name = "pll2_sysclk1",
+	.parent = &pll2_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV1,
+};
+
+static struct clk pll2_sysclkbp = {
+	.name = "pll2_sysclkbp",
+	.parent = &pll2_clk,
+	.flags = CLK_PLL | PRE_PLL,
+	.div_reg = BPDIV
+};
+
+static struct clk clkout3_clk = {
+	.name = "clkout3",
+	.parent = &pll2_sysclkbp,
+	/* NOTE:  clkout3 can be externally gated by muxing GPIO-16 */
+};
+
+static struct clk arm_clk = {
+	.name = "arm_clk",
+	.parent = &pll1_sysclk1,
+	.lpsc = DAVINCI_LPSC_ARM,
+	.flags = ALWAYS_ENABLED,
+};
+
+/*
+ * NOT LISTED below, and not touched by Linux
+ *   - in SyncReset state by default
+ *	.lpsc = DAVINCI_LPSC_TPCC,
+ *	.lpsc = DAVINCI_LPSC_TPTC0,
+ *	.lpsc = DAVINCI_LPSC_TPTC1,
+ *	.lpsc = DAVINCI_LPSC_DDR_EMIF, .parent = &sysclk2_clk,
+ *	.lpsc = DAVINCI_LPSC_MEMSTICK,
+ *   - in Enabled state by default
+ *	.lpsc = DAVINCI_LPSC_SYSTEM_SUBSYS,
+ *	.lpsc = DAVINCI_LPSC_SCR2,	// "bus"
+ *	.lpsc = DAVINCI_LPSC_SCR3,	// "bus"
+ *	.lpsc = DAVINCI_LPSC_SCR4,	// "bus"
+ *	.lpsc = DAVINCI_LPSC_CROSSBAR,	// "emulation"
+ *	.lpsc = DAVINCI_LPSC_CFG27,	// "test"
+ *	.lpsc = DAVINCI_LPSC_CFG3,	// "test"
+ *	.lpsc = DAVINCI_LPSC_CFG5,	// "test"
+ */
+
+static struct clk mjcp_clk = {
+	.name = "mjcp",
+	.parent = &pll1_sysclk1,
+	.lpsc = DAVINCI_LPSC_IMCOP,
+};
+
+static struct clk uart0_clk = {
+	.name = "uart0",
+	.parent = &pll1_aux_clk,
+	.lpsc = DAVINCI_LPSC_UART0,
+};
+
+static struct clk uart1_clk = {
+	.name = "uart1",
+	.parent = &pll1_aux_clk,
+	.lpsc = DAVINCI_LPSC_UART1,
+};
+
+static struct clk uart2_clk = {
+	.name = "uart2",
+	.parent = &pll1_sysclk2,
+	.lpsc = DAVINCI_LPSC_UART2,
+};
+
+static struct clk i2c_clk = {
+	.name = "i2c",
+	.parent = &pll1_aux_clk,
+	.lpsc = DAVINCI_LPSC_I2C,
+};
+
+static struct clk asp0_clk = {
+	.name = "asp0",
+	.parent = &pll1_sysclk2,
+	.lpsc = DAVINCI_LPSC_McBSP,
+};
+
+static struct clk asp1_clk = {
+	.name = "asp1",
+	.parent = &pll1_sysclk2,
+	.lpsc = DM355_LPSC_McBSP1,
+};
+
+static struct clk mmcsd0_clk = {
+	.name = "mmcsd0",
+	.parent = &pll1_sysclk2,
+	.lpsc = DAVINCI_LPSC_MMC_SD,
+};
+
+static struct clk mmcsd1_clk = {
+	.name = "mmcsd1",
+	.parent = &pll1_sysclk2,
+	.lpsc = DM355_LPSC_MMC_SD1,
+};
+
+static struct clk spi0_clk = {
+	.name = "spi0",
+	.parent = &pll1_sysclk2,
+	.lpsc = DAVINCI_LPSC_SPI,
+};
+
+static struct clk spi1_clk = {
+	.name = "spi1",
+	.parent = &pll1_sysclk2,
+	.lpsc = DM355_LPSC_SPI1,
+};
+
+static struct clk spi2_clk = {
+	.name = "spi2",
+	.parent = &pll1_sysclk2,
+	.lpsc = DM355_LPSC_SPI2,
+};
+
+static struct clk gpio_clk = {
+	.name = "gpio",
+	.parent = &pll1_sysclk2,
+	.lpsc = DAVINCI_LPSC_GPIO,
+};
+
+static struct clk aemif_clk = {
+	.name = "aemif",
+	.parent = &pll1_sysclk2,
+	.lpsc = DAVINCI_LPSC_AEMIF,
+};
+
+static struct clk pwm0_clk = {
+	.name = "pwm0",
+	.parent = &pll1_aux_clk,
+	.lpsc = DAVINCI_LPSC_PWM0,
+};
+
+static struct clk pwm1_clk = {
+	.name = "pwm1",
+	.parent = &pll1_aux_clk,
+	.lpsc = DAVINCI_LPSC_PWM1,
+};
+
+static struct clk pwm2_clk = {
+	.name = "pwm2",
+	.parent = &pll1_aux_clk,
+	.lpsc = DAVINCI_LPSC_PWM2,
+};
+
+static struct clk pwm3_clk = {
+	.name = "pwm3",
+	.parent = &pll1_aux_clk,
+	.lpsc = DM355_LPSC_PWM3,
+};
+
+static struct clk timer0_clk = {
+	.name = "timer0",
+	.parent = &pll1_aux_clk,
+	.lpsc = DAVINCI_LPSC_TIMER0,
+};
+
+static struct clk timer1_clk = {
+	.name = "timer1",
+	.parent = &pll1_aux_clk,
+	.lpsc = DAVINCI_LPSC_TIMER1,
+};
+
+static struct clk timer2_clk = {
+	.name = "timer2",
+	.parent = &pll1_aux_clk,
+	.lpsc = DAVINCI_LPSC_TIMER2,
+	.usecount = 1,              /* REVISIT: why cant' this be disabled? */
+};
+
+static struct clk timer3_clk = {
+	.name = "timer3",
+	.parent = &pll1_aux_clk,
+	.lpsc = DM355_LPSC_TIMER3,
+};
+
+static struct clk rto_clk = {
+	.name = "rto",
+	.parent = &pll1_aux_clk,
+	.lpsc = DM355_LPSC_RTO,
+};
+
+static struct clk usb_clk = {
+	.name = "usb",
+	.parent = &pll1_sysclk2,
+	.lpsc = DAVINCI_LPSC_USB,
+};
+
+static struct davinci_clk dm355_clks[] = {
+	CLK(NULL, "ref", &ref_clk),
+	CLK(NULL, "pll1", &pll1_clk),
+	CLK(NULL, "pll1_sysclk1", &pll1_sysclk1),
+	CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
+	CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
+	CLK(NULL, "pll1_sysclk4", &pll1_sysclk4),
+	CLK(NULL, "pll1_aux", &pll1_aux_clk),
+	CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp),
+	CLK(NULL, "vpss_dac", &vpss_dac_clk),
+	CLK(NULL, "vpss_master", &vpss_master_clk),
+	CLK(NULL, "vpss_slave", &vpss_slave_clk),
+	CLK(NULL, "clkout1", &clkout1_clk),
+	CLK(NULL, "clkout2", &clkout2_clk),
+	CLK(NULL, "pll2", &pll2_clk),
+	CLK(NULL, "pll2_sysclk1", &pll2_sysclk1),
+	CLK(NULL, "pll2_sysclkbp", &pll2_sysclkbp),
+	CLK(NULL, "clkout3", &clkout3_clk),
+	CLK(NULL, "arm", &arm_clk),
+	CLK(NULL, "mjcp", &mjcp_clk),
+	CLK(NULL, "uart0", &uart0_clk),
+	CLK(NULL, "uart1", &uart1_clk),
+	CLK(NULL, "uart2", &uart2_clk),
+	CLK("i2c_davinci.1", NULL, &i2c_clk),
+	CLK("soc-audio.0", NULL, &asp0_clk),
+	CLK("soc-audio.1", NULL, &asp1_clk),
+	CLK("davinci_mmc.0", NULL, &mmcsd0_clk),
+	CLK("davinci_mmc.1", NULL, &mmcsd1_clk),
+	CLK(NULL, "spi0", &spi0_clk),
+	CLK(NULL, "spi1", &spi1_clk),
+	CLK(NULL, "spi2", &spi2_clk),
+	CLK(NULL, "gpio", &gpio_clk),
+	CLK(NULL, "aemif", &aemif_clk),
+	CLK(NULL, "pwm0", &pwm0_clk),
+	CLK(NULL, "pwm1", &pwm1_clk),
+	CLK(NULL, "pwm2", &pwm2_clk),
+	CLK(NULL, "pwm3", &pwm3_clk),
+	CLK(NULL, "timer0", &timer0_clk),
+	CLK(NULL, "timer1", &timer1_clk),
+	CLK("watchdog", NULL, &timer2_clk),
+	CLK(NULL, "timer3", &timer3_clk),
+	CLK(NULL, "rto", &rto_clk),
+	CLK(NULL, "usb", &usb_clk),
+	CLK(NULL, NULL, NULL),
+};
+
+/*----------------------------------------------------------------------*/
+
+static u64 dm355_spi0_dma_mask = DMA_BIT_MASK(32);
+
+static struct resource dm355_spi0_resources[] = {
+	{
+		.start = 0x01c66000,
+		.end   = 0x01c667ff,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_DM355_SPINT0_1,
+		.flags = IORESOURCE_IRQ,
+	},
+	/* Not yet used, so not included:
+	 * IORESOURCE_IRQ:
+	 *  - IRQ_DM355_SPINT0_0
+	 * IORESOURCE_DMA:
+	 *  - DAVINCI_DMA_SPI_SPIX
+	 *  - DAVINCI_DMA_SPI_SPIR
+	 */
+};
+
+static struct platform_device dm355_spi0_device = {
+	.name = "spi_davinci",
+	.id = 0,
+	.dev = {
+		.dma_mask = &dm355_spi0_dma_mask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+	.num_resources = ARRAY_SIZE(dm355_spi0_resources),
+	.resource = dm355_spi0_resources,
+};
+
+void __init dm355_init_spi0(unsigned chipselect_mask,
+		struct spi_board_info *info, unsigned len)
+{
+	/* for now, assume we need MISO */
+	davinci_cfg_reg(DM355_SPI0_SDI);
+
+	/* not all slaves will be wired up */
+	if (chipselect_mask & BIT(0))
+		davinci_cfg_reg(DM355_SPI0_SDENA0);
+	if (chipselect_mask & BIT(1))
+		davinci_cfg_reg(DM355_SPI0_SDENA1);
+
+	spi_register_board_info(info, len);
+
+	platform_device_register(&dm355_spi0_device);
+}
+
+/*----------------------------------------------------------------------*/
+
+#define PINMUX0		0x00
+#define PINMUX1		0x04
+#define PINMUX2		0x08
+#define PINMUX3		0x0c
+#define PINMUX4		0x10
+#define INTMUX		0x18
+#define EVTMUX		0x1c
+
+/*
+ * Device specific mux setup
+ *
+ *	soc	description	mux  mode   mode  mux	 dbg
+ *				reg  offset mask  mode
+ */
+static const struct mux_config dm355_pins[] = {
+#ifdef CONFIG_DAVINCI_MUX
+MUX_CFG(DM355,	MMCSD0,		4,   2,     1,	  0,	 false)
+
+MUX_CFG(DM355,	SD1_CLK,	3,   6,     1,	  1,	 false)
+MUX_CFG(DM355,	SD1_CMD,	3,   7,     1,	  1,	 false)
+MUX_CFG(DM355,	SD1_DATA3,	3,   8,     3,	  1,	 false)
+MUX_CFG(DM355,	SD1_DATA2,	3,   10,    3,	  1,	 false)
+MUX_CFG(DM355,	SD1_DATA1,	3,   12,    3,	  1,	 false)
+MUX_CFG(DM355,	SD1_DATA0,	3,   14,    3,	  1,	 false)
+
+MUX_CFG(DM355,	I2C_SDA,	3,   19,    1,	  1,	 false)
+MUX_CFG(DM355,	I2C_SCL,	3,   20,    1,	  1,	 false)
+
+MUX_CFG(DM355,	MCBSP0_BDX,	3,   0,     1,	  1,	 false)
+MUX_CFG(DM355,	MCBSP0_X,	3,   1,     1,	  1,	 false)
+MUX_CFG(DM355,	MCBSP0_BFSX,	3,   2,     1,	  1,	 false)
+MUX_CFG(DM355,	MCBSP0_BDR,	3,   3,     1,	  1,	 false)
+MUX_CFG(DM355,	MCBSP0_R,	3,   4,     1,	  1,	 false)
+MUX_CFG(DM355,	MCBSP0_BFSR,	3,   5,     1,	  1,	 false)
+
+MUX_CFG(DM355,	SPI0_SDI,	4,   1,     1,    0,	 false)
+MUX_CFG(DM355,	SPI0_SDENA0,	4,   0,     1,    0,	 false)
+MUX_CFG(DM355,	SPI0_SDENA1,	3,   28,    1,    1,	 false)
+
+INT_CFG(DM355,  INT_EDMA_CC,	      2,    1,    1,     false)
+INT_CFG(DM355,  INT_EDMA_TC0_ERR,     3,    1,    1,     false)
+INT_CFG(DM355,  INT_EDMA_TC1_ERR,     4,    1,    1,     false)
+
+EVT_CFG(DM355,  EVT8_ASP1_TX,	      0,    1,    0,     false)
+EVT_CFG(DM355,  EVT9_ASP1_RX,	      1,    1,    0,     false)
+EVT_CFG(DM355,  EVT26_MMC0_RX,	      2,    1,    0,     false)
+#endif
+};
+
+static u8 dm355_default_priorities[DAVINCI_N_AINTC_IRQ] = {
+	[IRQ_DM355_CCDC_VDINT0]		= 2,
+	[IRQ_DM355_CCDC_VDINT1]		= 6,
+	[IRQ_DM355_CCDC_VDINT2]		= 6,
+	[IRQ_DM355_IPIPE_HST]		= 6,
+	[IRQ_DM355_H3AINT]		= 6,
+	[IRQ_DM355_IPIPE_SDR]		= 6,
+	[IRQ_DM355_IPIPEIFINT]		= 6,
+	[IRQ_DM355_OSDINT]		= 7,
+	[IRQ_DM355_VENCINT]		= 6,
+	[IRQ_ASQINT]			= 6,
+	[IRQ_IMXINT]			= 6,
+	[IRQ_USBINT]			= 4,
+	[IRQ_DM355_RTOINT]		= 4,
+	[IRQ_DM355_UARTINT2]		= 7,
+	[IRQ_DM355_TINT6]		= 7,
+	[IRQ_CCINT0]			= 5,	/* dma */
+	[IRQ_CCERRINT]			= 5,	/* dma */
+	[IRQ_TCERRINT0]			= 5,	/* dma */
+	[IRQ_TCERRINT]			= 5,	/* dma */
+	[IRQ_DM355_SPINT2_1]		= 7,
+	[IRQ_DM355_TINT7]		= 4,
+	[IRQ_DM355_SDIOINT0]		= 7,
+	[IRQ_MBXINT]			= 7,
+	[IRQ_MBRINT]			= 7,
+	[IRQ_MMCINT]			= 7,
+	[IRQ_DM355_MMCINT1]		= 7,
+	[IRQ_DM355_PWMINT3]		= 7,
+	[IRQ_DDRINT]			= 7,
+	[IRQ_AEMIFINT]			= 7,
+	[IRQ_DM355_SDIOINT1]		= 4,
+	[IRQ_TINT0_TINT12]		= 2,	/* clockevent */
+	[IRQ_TINT0_TINT34]		= 2,	/* clocksource */
+	[IRQ_TINT1_TINT12]		= 7,	/* DSP timer */
+	[IRQ_TINT1_TINT34]		= 7,	/* system tick */
+	[IRQ_PWMINT0]			= 7,
+	[IRQ_PWMINT1]			= 7,
+	[IRQ_PWMINT2]			= 7,
+	[IRQ_I2C]			= 3,
+	[IRQ_UARTINT0]			= 3,
+	[IRQ_UARTINT1]			= 3,
+	[IRQ_DM355_SPINT0_0]		= 3,
+	[IRQ_DM355_SPINT0_1]		= 3,
+	[IRQ_DM355_GPIO0]		= 3,
+	[IRQ_DM355_GPIO1]		= 7,
+	[IRQ_DM355_GPIO2]		= 4,
+	[IRQ_DM355_GPIO3]		= 4,
+	[IRQ_DM355_GPIO4]		= 7,
+	[IRQ_DM355_GPIO5]		= 7,
+	[IRQ_DM355_GPIO6]		= 7,
+	[IRQ_DM355_GPIO7]		= 7,
+	[IRQ_DM355_GPIO8]		= 7,
+	[IRQ_DM355_GPIO9]		= 7,
+	[IRQ_DM355_GPIOBNK0]		= 7,
+	[IRQ_DM355_GPIOBNK1]		= 7,
+	[IRQ_DM355_GPIOBNK2]		= 7,
+	[IRQ_DM355_GPIOBNK3]		= 7,
+	[IRQ_DM355_GPIOBNK4]		= 7,
+	[IRQ_DM355_GPIOBNK5]		= 7,
+	[IRQ_DM355_GPIOBNK6]		= 7,
+	[IRQ_COMMTX]			= 7,
+	[IRQ_COMMRX]			= 7,
+	[IRQ_EMUINT]			= 7,
+};
+
+/*----------------------------------------------------------------------*/
+
+static const s8 dma_chan_dm355_no_event[] = {
+	12, 13, 24, 56, 57,
+	58, 59, 60, 61, 62,
+	63,
+	-1
+};
+
+static struct edma_soc_info dm355_edma_info = {
+	.n_channel	= 64,
+	.n_region	= 4,
+	.n_slot		= 128,
+	.n_tc		= 2,
+	.noevent	= dma_chan_dm355_no_event,
+};
+
+static struct resource edma_resources[] = {
+	{
+		.name	= "edma_cc",
+		.start	= 0x01c00000,
+		.end	= 0x01c00000 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.name	= "edma_tc0",
+		.start	= 0x01c10000,
+		.end	= 0x01c10000 + SZ_1K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.name	= "edma_tc1",
+		.start	= 0x01c10400,
+		.end	= 0x01c10400 + SZ_1K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= IRQ_CCINT0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_CCERRINT,
+		.flags	= IORESOURCE_IRQ,
+	},
+	/* not using (or muxing) TC*_ERR */
+};
+
+static struct platform_device dm355_edma_device = {
+	.name			= "edma",
+	.id			= -1,
+	.dev.platform_data	= &dm355_edma_info,
+	.num_resources		= ARRAY_SIZE(edma_resources),
+	.resource		= edma_resources,
+};
+
+/*----------------------------------------------------------------------*/
+
+static struct map_desc dm355_io_desc[] = {
+	{
+		.virtual	= IO_VIRT,
+		.pfn		= __phys_to_pfn(IO_PHYS),
+		.length		= IO_SIZE,
+		.type		= MT_DEVICE
+	},
+	{
+		.virtual	= SRAM_VIRT,
+		.pfn		= __phys_to_pfn(0x00010000),
+		.length		= SZ_32K,
+		/* MT_MEMORY_NONCACHED requires supersection alignment */
+		.type		= MT_DEVICE,
+	},
+};
+
+/* Contents of JTAG ID register used to identify exact cpu type */
+static struct davinci_id dm355_ids[] = {
+	{
+		.variant	= 0x0,
+		.part_no	= 0xb73b,
+		.manufacturer	= 0x00f,
+		.cpu_id		= DAVINCI_CPU_ID_DM355,
+		.name		= "dm355",
+	},
+};
+
+static void __iomem *dm355_psc_bases[] = {
+	IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE),
+};
+
+/*
+ * T0_BOT: Timer 0, bottom:  clockevent source for hrtimers
+ * T0_TOP: Timer 0, top   :  clocksource for generic timekeeping
+ * T1_BOT: Timer 1, bottom:  (used by DSP in TI DSPLink code)
+ * T1_TOP: Timer 1, top   :  <unused>
+ */
+struct davinci_timer_info dm355_timer_info = {
+	.timers		= davinci_timer_instance,
+	.clockevent_id	= T0_BOT,
+	.clocksource_id	= T0_TOP,
+};
+
+static struct plat_serial8250_port dm355_serial_platform_data[] = {
+	{
+		.mapbase	= DAVINCI_UART0_BASE,
+		.irq		= IRQ_UARTINT0,
+		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+				  UPF_IOREMAP,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+	},
+	{
+		.mapbase	= DAVINCI_UART1_BASE,
+		.irq		= IRQ_UARTINT1,
+		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+				  UPF_IOREMAP,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+	},
+	{
+		.mapbase	= DM355_UART2_BASE,
+		.irq		= IRQ_DM355_UARTINT2,
+		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+				  UPF_IOREMAP,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+	},
+	{
+		.flags		= 0
+	},
+};
+
+static struct platform_device dm355_serial_device = {
+	.name			= "serial8250",
+	.id			= PLAT8250_DEV_PLATFORM,
+	.dev			= {
+		.platform_data	= dm355_serial_platform_data,
+	},
+};
+
+static struct davinci_soc_info davinci_soc_info_dm355 = {
+	.io_desc		= dm355_io_desc,
+	.io_desc_num		= ARRAY_SIZE(dm355_io_desc),
+	.jtag_id_base		= IO_ADDRESS(0x01c40028),
+	.ids			= dm355_ids,
+	.ids_num		= ARRAY_SIZE(dm355_ids),
+	.cpu_clks		= dm355_clks,
+	.psc_bases		= dm355_psc_bases,
+	.psc_bases_num		= ARRAY_SIZE(dm355_psc_bases),
+	.pinmux_base		= IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE),
+	.pinmux_pins		= dm355_pins,
+	.pinmux_pins_num	= ARRAY_SIZE(dm355_pins),
+	.intc_base		= IO_ADDRESS(DAVINCI_ARM_INTC_BASE),
+	.intc_type		= DAVINCI_INTC_TYPE_AINTC,
+	.intc_irq_prios		= dm355_default_priorities,
+	.intc_irq_num		= DAVINCI_N_AINTC_IRQ,
+	.timer_info		= &dm355_timer_info,
+	.wdt_base		= IO_ADDRESS(DAVINCI_WDOG_BASE),
+	.gpio_base		= IO_ADDRESS(DAVINCI_GPIO_BASE),
+	.gpio_num		= 104,
+	.gpio_irq		= IRQ_DM355_GPIOBNK0,
+	.serial_dev		= &dm355_serial_device,
+	.sram_dma		= 0x00010000,
+	.sram_len		= SZ_32K,
+};
+
+void __init dm355_init(void)
+{
+	davinci_common_init(&davinci_soc_info_dm355);
+}
+
+static int __init dm355_init_devices(void)
+{
+	if (!cpu_is_davinci_dm355())
+		return 0;
+
+	davinci_cfg_reg(DM355_INT_EDMA_CC);
+	platform_device_register(&dm355_edma_device);
+	return 0;
+}
+postcore_initcall(dm355_init_devices);
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index d428ef1..fb5449b3 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -11,7 +11,11 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/clk.h>
+#include <linux/serial_8250.h>
 #include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <asm/mach/map.h>
 
 #include <mach/dm644x.h>
 #include <mach/clock.h>
@@ -20,6 +24,9 @@
 #include <mach/irqs.h>
 #include <mach/psc.h>
 #include <mach/mux.h>
+#include <mach/time.h>
+#include <mach/serial.h>
+#include <mach/common.h>
 
 #include "clock.h"
 #include "mux.h"
@@ -312,7 +319,14 @@
 	CLK(NULL, NULL, NULL),
 };
 
-#if defined(CONFIG_TI_DAVINCI_EMAC) || defined(CONFIG_TI_DAVINCI_EMAC_MODULE)
+static struct emac_platform_data dm644x_emac_pdata = {
+	.ctrl_reg_offset	= DM644X_EMAC_CNTRL_OFFSET,
+	.ctrl_mod_reg_offset	= DM644X_EMAC_CNTRL_MOD_OFFSET,
+	.ctrl_ram_offset	= DM644X_EMAC_CNTRL_RAM_OFFSET,
+	.mdio_reg_offset	= DM644X_EMAC_MDIO_OFFSET,
+	.ctrl_ram_size		= DM644X_EMAC_CNTRL_RAM_SIZE,
+	.version		= EMAC_VERSION_1,
+};
 
 static struct resource dm644x_emac_resources[] = {
 	{
@@ -330,11 +344,15 @@
 static struct platform_device dm644x_emac_device = {
        .name		= "davinci_emac",
        .id		= 1,
+       .dev = {
+	       .platform_data	= &dm644x_emac_pdata,
+       },
        .num_resources	= ARRAY_SIZE(dm644x_emac_resources),
        .resource	= dm644x_emac_resources,
 };
 
-#endif
+#define PINMUX0		0x00
+#define PINMUX1		0x04
 
 /*
  * Device specific mux setup
@@ -343,6 +361,7 @@
  *				reg  offset mask  mode
  */
 static const struct mux_config dm644x_pins[] = {
+#ifdef CONFIG_DAVINCI_MUX
 MUX_CFG(DM644X, HDIREN,		0,   16,    1,	  1,	 true)
 MUX_CFG(DM644X, ATAEN,		0,   17,    1,	  1,	 true)
 MUX_CFG(DM644X, ATAEN_DISABLE,	0,   17,    1,	  0,	 true)
@@ -383,8 +402,76 @@
 
 MUX_CFG(DM644X, LOEEN,		0,   24,    1,	  1,	 true)
 MUX_CFG(DM644X, LFLDEN,		0,   25,    1,	  1,	 false)
+#endif
 };
 
+/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */
+static u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
+	[IRQ_VDINT0]		= 2,
+	[IRQ_VDINT1]		= 6,
+	[IRQ_VDINT2]		= 6,
+	[IRQ_HISTINT]		= 6,
+	[IRQ_H3AINT]		= 6,
+	[IRQ_PRVUINT]		= 6,
+	[IRQ_RSZINT]		= 6,
+	[7]			= 7,
+	[IRQ_VENCINT]		= 6,
+	[IRQ_ASQINT]		= 6,
+	[IRQ_IMXINT]		= 6,
+	[IRQ_VLCDINT]		= 6,
+	[IRQ_USBINT]		= 4,
+	[IRQ_EMACINT]		= 4,
+	[14]			= 7,
+	[15]			= 7,
+	[IRQ_CCINT0]		= 5,	/* dma */
+	[IRQ_CCERRINT]		= 5,	/* dma */
+	[IRQ_TCERRINT0]		= 5,	/* dma */
+	[IRQ_TCERRINT]		= 5,	/* dma */
+	[IRQ_PSCIN]		= 7,
+	[21]			= 7,
+	[IRQ_IDE]		= 4,
+	[23]			= 7,
+	[IRQ_MBXINT]		= 7,
+	[IRQ_MBRINT]		= 7,
+	[IRQ_MMCINT]		= 7,
+	[IRQ_SDIOINT]		= 7,
+	[28]			= 7,
+	[IRQ_DDRINT]		= 7,
+	[IRQ_AEMIFINT]		= 7,
+	[IRQ_VLQINT]		= 4,
+	[IRQ_TINT0_TINT12]	= 2,	/* clockevent */
+	[IRQ_TINT0_TINT34]	= 2,	/* clocksource */
+	[IRQ_TINT1_TINT12]	= 7,	/* DSP timer */
+	[IRQ_TINT1_TINT34]	= 7,	/* system tick */
+	[IRQ_PWMINT0]		= 7,
+	[IRQ_PWMINT1]		= 7,
+	[IRQ_PWMINT2]		= 7,
+	[IRQ_I2C]		= 3,
+	[IRQ_UARTINT0]		= 3,
+	[IRQ_UARTINT1]		= 3,
+	[IRQ_UARTINT2]		= 3,
+	[IRQ_SPINT0]		= 3,
+	[IRQ_SPINT1]		= 3,
+	[45]			= 7,
+	[IRQ_DSP2ARM0]		= 4,
+	[IRQ_DSP2ARM1]		= 4,
+	[IRQ_GPIO0]		= 7,
+	[IRQ_GPIO1]		= 7,
+	[IRQ_GPIO2]		= 7,
+	[IRQ_GPIO3]		= 7,
+	[IRQ_GPIO4]		= 7,
+	[IRQ_GPIO5]		= 7,
+	[IRQ_GPIO6]		= 7,
+	[IRQ_GPIO7]		= 7,
+	[IRQ_GPIOBNK0]		= 7,
+	[IRQ_GPIOBNK1]		= 7,
+	[IRQ_GPIOBNK2]		= 7,
+	[IRQ_GPIOBNK3]		= 7,
+	[IRQ_GPIOBNK4]		= 7,
+	[IRQ_COMMTX]		= 7,
+	[IRQ_COMMRX]		= 7,
+	[IRQ_EMUINT]		= 7,
+};
 
 /*----------------------------------------------------------------------*/
 
@@ -444,10 +531,118 @@
 };
 
 /*----------------------------------------------------------------------*/
+
+static struct map_desc dm644x_io_desc[] = {
+	{
+		.virtual	= IO_VIRT,
+		.pfn		= __phys_to_pfn(IO_PHYS),
+		.length		= IO_SIZE,
+		.type		= MT_DEVICE
+	},
+	{
+		.virtual	= SRAM_VIRT,
+		.pfn		= __phys_to_pfn(0x00008000),
+		.length		= SZ_16K,
+		/* MT_MEMORY_NONCACHED requires supersection alignment */
+		.type		= MT_DEVICE,
+	},
+};
+
+/* Contents of JTAG ID register used to identify exact cpu type */
+static struct davinci_id dm644x_ids[] = {
+	{
+		.variant	= 0x0,
+		.part_no	= 0xb700,
+		.manufacturer	= 0x017,
+		.cpu_id		= DAVINCI_CPU_ID_DM6446,
+		.name		= "dm6446",
+	},
+};
+
+static void __iomem *dm644x_psc_bases[] = {
+	IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE),
+};
+
+/*
+ * T0_BOT: Timer 0, bottom:  clockevent source for hrtimers
+ * T0_TOP: Timer 0, top   :  clocksource for generic timekeeping
+ * T1_BOT: Timer 1, bottom:  (used by DSP in TI DSPLink code)
+ * T1_TOP: Timer 1, top   :  <unused>
+ */
+struct davinci_timer_info dm644x_timer_info = {
+	.timers		= davinci_timer_instance,
+	.clockevent_id	= T0_BOT,
+	.clocksource_id	= T0_TOP,
+};
+
+static struct plat_serial8250_port dm644x_serial_platform_data[] = {
+	{
+		.mapbase	= DAVINCI_UART0_BASE,
+		.irq		= IRQ_UARTINT0,
+		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+				  UPF_IOREMAP,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+	},
+	{
+		.mapbase	= DAVINCI_UART1_BASE,
+		.irq		= IRQ_UARTINT1,
+		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+				  UPF_IOREMAP,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+	},
+	{
+		.mapbase	= DAVINCI_UART2_BASE,
+		.irq		= IRQ_UARTINT2,
+		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+				  UPF_IOREMAP,
+		.iotype		= UPIO_MEM,
+		.regshift	= 2,
+	},
+	{
+		.flags		= 0
+	},
+};
+
+static struct platform_device dm644x_serial_device = {
+	.name			= "serial8250",
+	.id			= PLAT8250_DEV_PLATFORM,
+	.dev			= {
+		.platform_data	= dm644x_serial_platform_data,
+	},
+};
+
+static struct davinci_soc_info davinci_soc_info_dm644x = {
+	.io_desc		= dm644x_io_desc,
+	.io_desc_num		= ARRAY_SIZE(dm644x_io_desc),
+	.jtag_id_base		= IO_ADDRESS(0x01c40028),
+	.ids			= dm644x_ids,
+	.ids_num		= ARRAY_SIZE(dm644x_ids),
+	.cpu_clks		= dm644x_clks,
+	.psc_bases		= dm644x_psc_bases,
+	.psc_bases_num		= ARRAY_SIZE(dm644x_psc_bases),
+	.pinmux_base		= IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE),
+	.pinmux_pins		= dm644x_pins,
+	.pinmux_pins_num	= ARRAY_SIZE(dm644x_pins),
+	.intc_base		= IO_ADDRESS(DAVINCI_ARM_INTC_BASE),
+	.intc_type		= DAVINCI_INTC_TYPE_AINTC,
+	.intc_irq_prios 	= dm644x_default_priorities,
+	.intc_irq_num		= DAVINCI_N_AINTC_IRQ,
+	.timer_info		= &dm644x_timer_info,
+	.wdt_base		= IO_ADDRESS(DAVINCI_WDOG_BASE),
+	.gpio_base		= IO_ADDRESS(DAVINCI_GPIO_BASE),
+	.gpio_num		= 71,
+	.gpio_irq		= IRQ_GPIOBNK0,
+	.serial_dev		= &dm644x_serial_device,
+	.emac_pdata		= &dm644x_emac_pdata,
+	.sram_dma		= 0x00008000,
+	.sram_len		= SZ_16K,
+};
+
 void __init dm644x_init(void)
 {
-	davinci_clk_init(dm644x_clks);
-	davinci_mux_register(dm644x_pins, ARRAY_SIZE(dm644x_pins));
+	davinci_common_init(&davinci_soc_info_dm644x);
 }
 
 static int __init dm644x_init_devices(void)
@@ -456,6 +651,7 @@
 		return 0;
 
 	platform_device_register(&dm644x_edma_device);
+	platform_device_register(&dm644x_emac_device);
 	return 0;
 }
 postcore_initcall(dm644x_init_devices);
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
new file mode 100644
index 0000000..334f0711
--- /dev/null
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -0,0 +1,636 @@
+/*
+ * TI DaVinci DM644x chip specific setup
+ *
+ * Author: Kevin Hilman, Deep Root Systems, LLC
+ *
+ * 2007 (c) Deep Root Systems, LLC. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <asm/mach/map.h>
+
+#include <mach/dm646x.h>
+#include <mach/clock.h>
+#include <mach/cputype.h>
+#include <mach/edma.h>
+#include <mach/irqs.h>
+#include <mach/psc.h>
+#include <mach/mux.h>
+#include <mach/time.h>
+#include <mach/serial.h>
+#include <mach/common.h>
+
+#include "clock.h"
+#include "mux.h"
+
+/*
+ * Device specific clocks
+ */
+#define DM646X_REF_FREQ		27000000
+#define DM646X_AUX_FREQ		24000000
+
+static struct pll_data pll1_data = {
+	.num       = 1,
+	.phys_base = DAVINCI_PLL1_BASE,
+};
+
+static struct pll_data pll2_data = {
+	.num       = 2,
+	.phys_base = DAVINCI_PLL2_BASE,
+};
+
+static struct clk ref_clk = {
+	.name = "ref_clk",
+	.rate = DM646X_REF_FREQ,
+};
+
+static struct clk aux_clkin = {
+	.name = "aux_clkin",
+	.rate = DM646X_AUX_FREQ,
+};
+
+static struct clk pll1_clk = {
+	.name = "pll1",
+	.parent = &ref_clk,
+	.pll_data = &pll1_data,
+	.flags = CLK_PLL,
+};
+
+static struct clk pll1_sysclk1 = {
+	.name = "pll1_sysclk1",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV1,
+};
+
+static struct clk pll1_sysclk2 = {
+	.name = "pll1_sysclk2",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV2,
+};
+
+static struct clk pll1_sysclk3 = {
+	.name = "pll1_sysclk3",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV3,
+};
+
+static struct clk pll1_sysclk4 = {
+	.name = "pll1_sysclk4",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV4,
+};
+
+static struct clk pll1_sysclk5 = {
+	.name = "pll1_sysclk5",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV5,
+};
+
+static struct clk pll1_sysclk6 = {
+	.name = "pll1_sysclk6",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV6,
+};
+
+static struct clk pll1_sysclk8 = {
+	.name = "pll1_sysclk8",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV8,
+};
+
+static struct clk pll1_sysclk9 = {
+	.name = "pll1_sysclk9",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV9,
+};
+
+static struct clk pll1_sysclkbp = {
+	.name = "pll1_sysclkbp",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL | PRE_PLL,
+	.div_reg = BPDIV,
+};
+
+static struct clk pll1_aux_clk = {
+	.name = "pll1_aux_clk",
+	.parent = &pll1_clk,
+	.flags = CLK_PLL | PRE_PLL,
+};
+
+static struct clk pll2_clk = {
+	.name = "pll2_clk",
+	.parent = &ref_clk,
+	.pll_data = &pll2_data,
+	.flags = CLK_PLL,
+};
+
+static struct clk pll2_sysclk1 = {
+	.name = "pll2_sysclk1",
+	.parent = &pll2_clk,
+	.flags = CLK_PLL,
+	.div_reg = PLLDIV1,
+};
+
+static struct clk dsp_clk = {
+	.name = "dsp",
+	.parent = &pll1_sysclk1,
+	.lpsc = DM646X_LPSC_C64X_CPU,
+	.flags = PSC_DSP,
+	.usecount = 1,			/* REVISIT how to disable? */
+};
+
+static struct clk arm_clk = {
+	.name = "arm",
+	.parent = &pll1_sysclk2,
+	.lpsc = DM646X_LPSC_ARM,
+	.flags = ALWAYS_ENABLED,
+};
+
+static struct clk uart0_clk = {
+	.name = "uart0",
+	.parent = &aux_clkin,
+	.lpsc = DM646X_LPSC_UART0,
+};
+
+static struct clk uart1_clk = {
+	.name = "uart1",
+	.parent = &aux_clkin,
+	.lpsc = DM646X_LPSC_UART1,
+};
+
+static struct clk uart2_clk = {
+	.name = "uart2",
+	.parent = &aux_clkin,
+	.lpsc = DM646X_LPSC_UART2,
+};
+
+static struct clk i2c_clk = {
+	.name = "I2CCLK",
+	.parent = &pll1_sysclk3,
+	.lpsc = DM646X_LPSC_I2C,
+};
+
+static struct clk gpio_clk = {
+	.name = "gpio",
+	.parent = &pll1_sysclk3,
+	.lpsc = DM646X_LPSC_GPIO,
+};
+
+static struct clk aemif_clk = {
+	.name = "aemif",
+	.parent = &pll1_sysclk3,
+	.lpsc = DM646X_LPSC_AEMIF,
+	.flags = ALWAYS_ENABLED,
+};
+
+static struct clk emac_clk = {
+	.name = "emac",
+	.parent = &pll1_sysclk3,
+	.lpsc = DM646X_LPSC_EMAC,
+};
+
+static struct clk pwm0_clk = {
+	.name = "pwm0",
+	.parent = &pll1_sysclk3,
+	.lpsc = DM646X_LPSC_PWM0,
+	.usecount = 1,            /* REVIST: disabling hangs system */
+};
+
+static struct clk pwm1_clk = {
+	.name = "pwm1",
+	.parent = &pll1_sysclk3,
+	.lpsc = DM646X_LPSC_PWM1,
+	.usecount = 1,            /* REVIST: disabling hangs system */
+};
+
+static struct clk timer0_clk = {
+	.name = "timer0",
+	.parent = &pll1_sysclk3,
+	.lpsc = DM646X_LPSC_TIMER0,
+};
+
+static struct clk timer1_clk = {
+	.name = "timer1",
+	.parent = &pll1_sysclk3,
+	.lpsc = DM646X_LPSC_TIMER1,
+};
+
+static struct clk timer2_clk = {
+	.name = "timer2",
+	.parent = &pll1_sysclk3,
+	.flags = ALWAYS_ENABLED, /* no LPSC, always enabled; c.f. spruep9a */
+};
+
+static struct clk vpif0_clk = {
+	.name = "vpif0",
+	.parent = &ref_clk,
+	.lpsc = DM646X_LPSC_VPSSMSTR,
+	.flags = ALWAYS_ENABLED,
+};
+
+static struct clk vpif1_clk = {
+	.name = "vpif1",
+	.parent = &ref_clk,
+	.lpsc = DM646X_LPSC_VPSSSLV,
+	.flags = ALWAYS_ENABLED,
+};
+
+struct davinci_clk dm646x_clks[] = {
+	CLK(NULL, "ref", &ref_clk),
+	CLK(NULL, "aux", &aux_clkin),
+	CLK(NULL, "pll1", &pll1_clk),
+	CLK(NULL, "pll1_sysclk", &pll1_sysclk1),
+	CLK(NULL, "pll1_sysclk", &pll1_sysclk2),
+	CLK(NULL, "pll1_sysclk", &pll1_sysclk3),
+	CLK(NULL, "pll1_sysclk", &pll1_sysclk4),
+	CLK(NULL, "pll1_sysclk", &pll1_sysclk5),
+	CLK(NULL, "pll1_sysclk", &pll1_sysclk6),
+	CLK(NULL, "pll1_sysclk", &pll1_sysclk8),
+	CLK(NULL, "pll1_sysclk", &pll1_sysclk9),
+	CLK(NULL, "pll1_sysclk", &pll1_sysclkbp),
+	CLK(NULL, "pll1_aux", &pll1_aux_clk),
+	CLK(NULL, "pll2", &pll2_clk),
+	CLK(NULL, "pll2_sysclk1", &pll2_sysclk1),
+	CLK(NULL, "dsp", &dsp_clk),
+	CLK(NULL, "arm", &arm_clk),
+	CLK(NULL, "uart0", &uart0_clk),
+	CLK(NULL, "uart1", &uart1_clk),
+	CLK(NULL, "uart2", &uart2_clk),
+	CLK("i2c_davinci.1", NULL, &i2c_clk),
+	CLK(NULL, "gpio", &gpio_clk),
+	CLK(NULL, "aemif", &aemif_clk),
+	CLK("davinci_emac.1", NULL, &emac_clk),
+	CLK(NULL, "pwm0", &pwm0_clk),
+	CLK(NULL, "pwm1", &pwm1_clk),
+	CLK(NULL, "timer0", &timer0_clk),
+	CLK(NULL, "timer1", &timer1_clk),
+	CLK("watchdog", NULL, &timer2_clk),
+	CLK(NULL, "vpif0", &vpif0_clk),
+	CLK(NULL, "vpif1", &vpif1_clk),
+	CLK(NULL, NULL, NULL),
+};
+
+static struct emac_platform_data dm646x_emac_pdata = {
+	.ctrl_reg_offset	= DM646X_EMAC_CNTRL_OFFSET,
+	.ctrl_mod_reg_offset	= DM646X_EMAC_CNTRL_MOD_OFFSET,
+	.ctrl_ram_offset	= DM646X_EMAC_CNTRL_RAM_OFFSET,
+	.mdio_reg_offset	= DM646X_EMAC_MDIO_OFFSET,
+	.ctrl_ram_size		= DM646X_EMAC_CNTRL_RAM_SIZE,
+	.version		= EMAC_VERSION_2,
+};
+
+static struct resource dm646x_emac_resources[] = {
+	{
+		.start	= DM646X_EMAC_BASE,
+		.end	= DM646X_EMAC_BASE + 0x47ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= IRQ_DM646X_EMACRXTHINT,
+		.end	= IRQ_DM646X_EMACRXTHINT,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM646X_EMACRXINT,
+		.end	= IRQ_DM646X_EMACRXINT,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM646X_EMACTXINT,
+		.end	= IRQ_DM646X_EMACTXINT,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_DM646X_EMACMISCINT,
+		.end	= IRQ_DM646X_EMACMISCINT,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device dm646x_emac_device = {
+	.name		= "davinci_emac",
+	.id		= 1,
+	.dev = {
+		.platform_data	= &dm646x_emac_pdata,
+	},
+	.num_resources	= ARRAY_SIZE(dm646x_emac_resources),
+	.resource	= dm646x_emac_resources,
+};
+
+#define PINMUX0		0x00
+#define PINMUX1		0x04
+
+/*
+ * Device specific mux setup
+ *
+ *	soc	description	mux  mode   mode  mux	 dbg
+ *				reg  offset mask  mode
+ */
+static const struct mux_config dm646x_pins[] = {
+#ifdef CONFIG_DAVINCI_MUX
+MUX_CFG(DM646X, ATAEN,		0,   0,     1,	  1,	 true)
+
+MUX_CFG(DM646X, AUDCK1,		0,   29,    1,	  0,	 false)
+
+MUX_CFG(DM646X, AUDCK0,		0,   28,    1,	  0,	 false)
+
+MUX_CFG(DM646X, CRGMUX,			0,   24,    7,    5,	 true)
+
+MUX_CFG(DM646X, STSOMUX_DISABLE,	0,   22,    3,    0,	 true)
+
+MUX_CFG(DM646X, STSIMUX_DISABLE,	0,   20,    3,    0,	 true)
+
+MUX_CFG(DM646X, PTSOMUX_DISABLE,	0,   18,    3,    0,	 true)
+
+MUX_CFG(DM646X, PTSIMUX_DISABLE,	0,   16,    3,    0,	 true)
+
+MUX_CFG(DM646X, STSOMUX,		0,   22,    3,    2,	 true)
+
+MUX_CFG(DM646X, STSIMUX,		0,   20,    3,    2,	 true)
+
+MUX_CFG(DM646X, PTSOMUX_PARALLEL,	0,   18,    3,    2,	 true)
+
+MUX_CFG(DM646X, PTSIMUX_PARALLEL,	0,   16,    3,    2,	 true)
+
+MUX_CFG(DM646X, PTSOMUX_SERIAL,		0,   18,    3,    3,	 true)
+
+MUX_CFG(DM646X, PTSIMUX_SERIAL,		0,   16,    3,    3,	 true)
+#endif
+};
+
+static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
+	[IRQ_DM646X_VP_VERTINT0]        = 7,
+	[IRQ_DM646X_VP_VERTINT1]        = 7,
+	[IRQ_DM646X_VP_VERTINT2]        = 7,
+	[IRQ_DM646X_VP_VERTINT3]        = 7,
+	[IRQ_DM646X_VP_ERRINT]          = 7,
+	[IRQ_DM646X_RESERVED_1]         = 7,
+	[IRQ_DM646X_RESERVED_2]         = 7,
+	[IRQ_DM646X_WDINT]              = 7,
+	[IRQ_DM646X_CRGENINT0]          = 7,
+	[IRQ_DM646X_CRGENINT1]          = 7,
+	[IRQ_DM646X_TSIFINT0]           = 7,
+	[IRQ_DM646X_TSIFINT1]           = 7,
+	[IRQ_DM646X_VDCEINT]            = 7,
+	[IRQ_DM646X_USBINT]             = 7,
+	[IRQ_DM646X_USBDMAINT]          = 7,
+	[IRQ_DM646X_PCIINT]             = 7,
+	[IRQ_CCINT0]                    = 7,    /* dma */
+	[IRQ_CCERRINT]                  = 7,    /* dma */
+	[IRQ_TCERRINT0]                 = 7,    /* dma */
+	[IRQ_TCERRINT]                  = 7,    /* dma */
+	[IRQ_DM646X_TCERRINT2]          = 7,
+	[IRQ_DM646X_TCERRINT3]          = 7,
+	[IRQ_DM646X_IDE]                = 7,
+	[IRQ_DM646X_HPIINT]             = 7,
+	[IRQ_DM646X_EMACRXTHINT]        = 7,
+	[IRQ_DM646X_EMACRXINT]          = 7,
+	[IRQ_DM646X_EMACTXINT]          = 7,
+	[IRQ_DM646X_EMACMISCINT]        = 7,
+	[IRQ_DM646X_MCASP0TXINT]        = 7,
+	[IRQ_DM646X_MCASP0RXINT]        = 7,
+	[IRQ_AEMIFINT]                  = 7,
+	[IRQ_DM646X_RESERVED_3]         = 7,
+	[IRQ_DM646X_MCASP1TXINT]        = 7,    /* clockevent */
+	[IRQ_TINT0_TINT34]              = 7,    /* clocksource */
+	[IRQ_TINT1_TINT12]              = 7,    /* DSP timer */
+	[IRQ_TINT1_TINT34]              = 7,    /* system tick */
+	[IRQ_PWMINT0]                   = 7,
+	[IRQ_PWMINT1]                   = 7,
+	[IRQ_DM646X_VLQINT]             = 7,
+	[IRQ_I2C]                       = 7,
+	[IRQ_UARTINT0]                  = 7,
+	[IRQ_UARTINT1]                  = 7,
+	[IRQ_DM646X_UARTINT2]           = 7,
+	[IRQ_DM646X_SPINT0]             = 7,
+	[IRQ_DM646X_SPINT1]             = 7,
+	[IRQ_DM646X_DSP2ARMINT]         = 7,
+	[IRQ_DM646X_RESERVED_4]         = 7,
+	[IRQ_DM646X_PSCINT]             = 7,
+	[IRQ_DM646X_GPIO0]              = 7,
+	[IRQ_DM646X_GPIO1]              = 7,
+	[IRQ_DM646X_GPIO2]              = 7,
+	[IRQ_DM646X_GPIO3]              = 7,
+	[IRQ_DM646X_GPIO4]              = 7,
+	[IRQ_DM646X_GPIO5]              = 7,
+	[IRQ_DM646X_GPIO6]              = 7,
+	[IRQ_DM646X_GPIO7]              = 7,
+	[IRQ_DM646X_GPIOBNK0]           = 7,
+	[IRQ_DM646X_GPIOBNK1]           = 7,
+	[IRQ_DM646X_GPIOBNK2]           = 7,
+	[IRQ_DM646X_DDRINT]             = 7,
+	[IRQ_DM646X_AEMIFINT]           = 7,
+	[IRQ_COMMTX]                    = 7,
+	[IRQ_COMMRX]                    = 7,
+	[IRQ_EMUINT]                    = 7,
+};
+
+/*----------------------------------------------------------------------*/
+
+static const s8 dma_chan_dm646x_no_event[] = {
+	 0,  1,  2,  3, 13,
+	14, 15, 24, 25, 26,
+	27, 30, 31, 54, 55,
+	56,
+	-1
+};
+
+static struct edma_soc_info dm646x_edma_info = {
+	.n_channel	= 64,
+	.n_region	= 6,	/* 0-1, 4-7 */
+	.n_slot		= 512,
+	.n_tc		= 4,
+	.noevent	= dma_chan_dm646x_no_event,
+};
+
+static struct resource edma_resources[] = {
+	{
+		.name	= "edma_cc",
+		.start	= 0x01c00000,
+		.end	= 0x01c00000 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.name	= "edma_tc0",
+		.start	= 0x01c10000,
+		.end	= 0x01c10000 + SZ_1K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.name	= "edma_tc1",
+		.start	= 0x01c10400,
+		.end	= 0x01c10400 + SZ_1K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.name	= "edma_tc2",
+		.start	= 0x01c10800,
+		.end	= 0x01c10800 + SZ_1K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.name	= "edma_tc3",
+		.start	= 0x01c10c00,
+		.end	= 0x01c10c00 + SZ_1K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= IRQ_CCINT0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= IRQ_CCERRINT,
+		.flags	= IORESOURCE_IRQ,
+	},
+	/* not using TC*_ERR */
+};
+
+static struct platform_device dm646x_edma_device = {
+	.name			= "edma",
+	.id			= -1,
+	.dev.platform_data	= &dm646x_edma_info,
+	.num_resources		= ARRAY_SIZE(edma_resources),
+	.resource		= edma_resources,
+};
+
+/*----------------------------------------------------------------------*/
+
+static struct map_desc dm646x_io_desc[] = {
+	{
+		.virtual	= IO_VIRT,
+		.pfn		= __phys_to_pfn(IO_PHYS),
+		.length		= IO_SIZE,
+		.type		= MT_DEVICE
+	},
+	{
+		.virtual	= SRAM_VIRT,
+		.pfn		= __phys_to_pfn(0x00010000),
+		.length		= SZ_32K,
+		/* MT_MEMORY_NONCACHED requires supersection alignment */
+		.type		= MT_DEVICE,
+	},
+};
+
+/* Contents of JTAG ID register used to identify exact cpu type */
+static struct davinci_id dm646x_ids[] = {
+	{
+		.variant	= 0x0,
+		.part_no	= 0xb770,
+		.manufacturer	= 0x017,
+		.cpu_id		= DAVINCI_CPU_ID_DM6467,
+		.name		= "dm6467",
+	},
+};
+
+static void __iomem *dm646x_psc_bases[] = {
+	IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE),
+};
+
+/*
+ * T0_BOT: Timer 0, bottom:  clockevent source for hrtimers
+ * T0_TOP: Timer 0, top   :  clocksource for generic timekeeping
+ * T1_BOT: Timer 1, bottom:  (used by DSP in TI DSPLink code)
+ * T1_TOP: Timer 1, top   :  <unused>
+ */
+struct davinci_timer_info dm646x_timer_info = {
+	.timers		= davinci_timer_instance,
+	.clockevent_id	= T0_BOT,
+	.clocksource_id	= T0_TOP,
+};
+
+static struct plat_serial8250_port dm646x_serial_platform_data[] = {
+	{
+		.mapbase	= DAVINCI_UART0_BASE,
+		.irq		= IRQ_UARTINT0,
+		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+				  UPF_IOREMAP,
+		.iotype		= UPIO_MEM32,
+		.regshift	= 2,
+	},
+	{
+		.mapbase	= DAVINCI_UART1_BASE,
+		.irq		= IRQ_UARTINT1,
+		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+				  UPF_IOREMAP,
+		.iotype		= UPIO_MEM32,
+		.regshift	= 2,
+	},
+	{
+		.mapbase	= DAVINCI_UART2_BASE,
+		.irq		= IRQ_DM646X_UARTINT2,
+		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+				  UPF_IOREMAP,
+		.iotype		= UPIO_MEM32,
+		.regshift	= 2,
+	},
+	{
+		.flags		= 0
+	},
+};
+
+static struct platform_device dm646x_serial_device = {
+	.name			= "serial8250",
+	.id			= PLAT8250_DEV_PLATFORM,
+	.dev			= {
+		.platform_data	= dm646x_serial_platform_data,
+	},
+};
+
+static struct davinci_soc_info davinci_soc_info_dm646x = {
+	.io_desc		= dm646x_io_desc,
+	.io_desc_num		= ARRAY_SIZE(dm646x_io_desc),
+	.jtag_id_base		= IO_ADDRESS(0x01c40028),
+	.ids			= dm646x_ids,
+	.ids_num		= ARRAY_SIZE(dm646x_ids),
+	.cpu_clks		= dm646x_clks,
+	.psc_bases		= dm646x_psc_bases,
+	.psc_bases_num		= ARRAY_SIZE(dm646x_psc_bases),
+	.pinmux_base		= IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE),
+	.pinmux_pins		= dm646x_pins,
+	.pinmux_pins_num	= ARRAY_SIZE(dm646x_pins),
+	.intc_base		= IO_ADDRESS(DAVINCI_ARM_INTC_BASE),
+	.intc_type		= DAVINCI_INTC_TYPE_AINTC,
+	.intc_irq_prios		= dm646x_default_priorities,
+	.intc_irq_num		= DAVINCI_N_AINTC_IRQ,
+	.timer_info		= &dm646x_timer_info,
+	.wdt_base		= IO_ADDRESS(DAVINCI_WDOG_BASE),
+	.gpio_base		= IO_ADDRESS(DAVINCI_GPIO_BASE),
+	.gpio_num		= 43, /* Only 33 usable */
+	.gpio_irq		= IRQ_DM646X_GPIOBNK0,
+	.serial_dev		= &dm646x_serial_device,
+	.emac_pdata		= &dm646x_emac_pdata,
+	.sram_dma		= 0x10010000,
+	.sram_len		= SZ_32K,
+};
+
+void __init dm646x_init(void)
+{
+	davinci_common_init(&davinci_soc_info_dm646x);
+}
+
+static int __init dm646x_init_devices(void)
+{
+	if (!cpu_is_davinci_dm646x())
+		return 0;
+
+	platform_device_register(&dm646x_edma_device);
+	platform_device_register(&dm646x_emac_device);
+	return 0;
+}
+postcore_initcall(dm646x_init_devices);
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index 1aba41c..1b65321 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -23,6 +23,7 @@
 #include <mach/cputype.h>
 #include <mach/irqs.h>
 #include <mach/hardware.h>
+#include <mach/common.h>
 #include <mach/gpio.h>
 
 #include <asm/mach/irq.h>
@@ -37,14 +38,13 @@
 
 static struct davinci_gpio chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)];
 
-static unsigned __initdata ngpio;
-
 /* create a non-inlined version */
 static struct gpio_controller __iomem * __init gpio2controller(unsigned gpio)
 {
 	return __gpio_to_controller(gpio);
 }
 
+static int __init davinci_gpio_irq_setup(void);
 
 /*--------------------------------------------------------------------------*/
 
@@ -115,23 +115,16 @@
 static int __init davinci_gpio_setup(void)
 {
 	int i, base;
+	unsigned ngpio;
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
 
-	/* The gpio banks conceptually expose a segmented bitmap,
+	/*
+	 * The gpio banks conceptually expose a segmented bitmap,
 	 * and "ngpio" is one more than the largest zero-based
 	 * bit index that's valid.
 	 */
-	if (cpu_is_davinci_dm355()) {		/* or dm335() */
-		ngpio = 104;
-	} else if (cpu_is_davinci_dm644x()) {	/* or dm337() */
-		ngpio = 71;
-	} else if (cpu_is_davinci_dm646x()) {
-		/* NOTE:  each bank has several "reserved" bits,
-		 * unusable as GPIOs.  Only 33 of the GPIO numbers
-		 * are usable, and we're not rejecting the others.
-		 */
-		ngpio = 43;
-	} else {
-		/* if cpu_is_davinci_dm643x() ngpio = 111 */
+	ngpio = soc_info->gpio_num;
+	if (ngpio == 0) {
 		pr_err("GPIO setup:  how many GPIOs?\n");
 		return -EINVAL;
 	}
@@ -157,6 +150,7 @@
 		gpiochip_add(&chips[i].chip);
 	}
 
+	davinci_gpio_irq_setup();
 	return 0;
 }
 pure_initcall(davinci_gpio_setup);
@@ -187,10 +181,15 @@
 {
 	struct gpio_controller *__iomem g = get_irq_chip_data(irq);
 	u32 mask = __gpio_mask(irq_to_gpio(irq));
+	unsigned status = irq_desc[irq].status;
 
-	if (irq_desc[irq].status & IRQ_TYPE_EDGE_FALLING)
+	status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
+	if (!status)
+		status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
+
+	if (status & IRQ_TYPE_EDGE_FALLING)
 		__raw_writel(mask, &g->set_falling);
-	if (irq_desc[irq].status & IRQ_TYPE_EDGE_RISING)
+	if (status & IRQ_TYPE_EDGE_RISING)
 		__raw_writel(mask, &g->set_rising);
 }
 
@@ -205,10 +204,13 @@
 	irq_desc[irq].status &= ~IRQ_TYPE_SENSE_MASK;
 	irq_desc[irq].status |= trigger;
 
-	__raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
-		     ? &g->set_falling : &g->clr_falling);
-	__raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING)
-		     ? &g->set_rising : &g->clr_rising);
+	/* don't enable the IRQ if it's currently disabled */
+	if (irq_desc[irq].depth == 0) {
+		__raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
+			     ? &g->set_falling : &g->clr_falling);
+		__raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING)
+			     ? &g->set_rising : &g->clr_rising);
+	}
 	return 0;
 }
 
@@ -230,6 +232,7 @@
 		mask <<= 16;
 
 	/* temporarily mask (level sensitive) parent IRQ */
+	desc->chip->mask(irq);
 	desc->chip->ack(irq);
 	while (1) {
 		u32		status;
@@ -268,17 +271,15 @@
 static int __init davinci_gpio_irq_setup(void)
 {
 	unsigned	gpio, irq, bank;
-	unsigned	bank_irq;
 	struct clk	*clk;
 	u32		binten = 0;
+	unsigned	ngpio, bank_irq;
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
 
-	if (cpu_is_davinci_dm355()) {		/* or dm335() */
-		bank_irq = IRQ_DM355_GPIOBNK0;
-	} else if (cpu_is_davinci_dm644x()) {
-		bank_irq = IRQ_GPIOBNK0;
-	} else if (cpu_is_davinci_dm646x()) {
-		bank_irq = IRQ_DM646X_GPIOBNK0;
-	} else {
+	ngpio = soc_info->gpio_num;
+
+	bank_irq = soc_info->gpio_irq;
+	if (bank_irq == 0) {
 		printk(KERN_ERR "Don't know first GPIO bank IRQ.\n");
 		return -EINVAL;
 	}
@@ -318,11 +319,9 @@
 	/* BINTEN -- per-bank interrupt enable. genirq would also let these
 	 * bits be set/cleared dynamically.
 	 */
-	__raw_writel(binten, (void *__iomem)
-		     IO_ADDRESS(DAVINCI_GPIO_BASE + 0x08));
+	__raw_writel(binten, soc_info->gpio_base + 0x08);
 
 	printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0));
 
 	return 0;
 }
-arch_initcall(davinci_gpio_irq_setup);
diff --git a/arch/arm/mach-davinci/id.c b/arch/arm/mach-davinci/id.c
deleted file mode 100644
index 018b994..0000000
--- a/arch/arm/mach-davinci/id.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Davinci CPU identification code
- *
- * Copyright (C) 2006 Komal Shah <komal_shah802003@yahoo.com>
- *
- * Derived from OMAP1 CPU identification code.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#define JTAG_ID_BASE		IO_ADDRESS(0x01c40028)
-
-static unsigned int davinci_revision;
-
-struct davinci_id {
-	u8	variant;	/* JTAG ID bits 31:28 */
-	u16	part_no;	/* JTAG ID bits 27:12 */
-	u32	manufacturer;	/* JTAG ID bits 11:1 */
-	u32	type;		/* Cpu id bits [31:8], cpu class bits [7:0] */
-};
-
-/* Register values to detect the DaVinci version */
-static struct davinci_id davinci_ids[] __initdata = {
-	{
-		/* DM6446 */
-		.part_no      = 0xb700,
-		.variant      = 0x0,
-		.manufacturer = 0x017,
-		.type	      = 0x64460000,
-	},
-	{
-		/* DM646X */
-		.part_no      = 0xb770,
-		.variant      = 0x0,
-		.manufacturer = 0x017,
-		.type         = 0x64670000,
-	},
-	{
-		/* DM355 */
-		.part_no	= 0xb73b,
-		.variant	= 0x0,
-		.manufacturer	= 0x00f,
-		.type		= 0x03550000,
-	},
-};
-
-/*
- * Get Device Part No. from JTAG ID register
- */
-static u16 __init davinci_get_part_no(void)
-{
-	u32 dev_id, part_no;
-
-	dev_id = __raw_readl(JTAG_ID_BASE);
-
-	part_no = ((dev_id >> 12) & 0xffff);
-
-	return part_no;
-}
-
-/*
- * Get Device Revision from JTAG ID register
- */
-static u8 __init davinci_get_variant(void)
-{
-	u32 variant;
-
-	variant = __raw_readl(JTAG_ID_BASE);
-
-	variant = (variant >> 28) & 0xf;
-
-	return variant;
-}
-
-unsigned int davinci_rev(void)
-{
-	return davinci_revision >> 16;
-}
-EXPORT_SYMBOL(davinci_rev);
-
-void __init davinci_check_revision(void)
-{
-	int i;
-	u16 part_no;
-	u8 variant;
-
-	part_no = davinci_get_part_no();
-	variant = davinci_get_variant();
-
-	/* First check only the major version in a safe way */
-	for (i = 0; i < ARRAY_SIZE(davinci_ids); i++) {
-		if (part_no == (davinci_ids[i].part_no)) {
-			davinci_revision = davinci_ids[i].type;
-			break;
-		}
-	}
-
-	/* Check if we can find the dev revision */
-	for (i = 0; i < ARRAY_SIZE(davinci_ids); i++) {
-		if (part_no == davinci_ids[i].part_no &&
-		    variant == davinci_ids[i].variant) {
-			davinci_revision = davinci_ids[i].type;
-			break;
-		}
-	}
-
-	printk(KERN_INFO "DaVinci DM%04x variant 0x%x\n",
-	       davinci_rev(), variant);
-}
diff --git a/arch/arm/mach-davinci/include/mach/board-dm6446evm.h b/arch/arm/mach-davinci/include/mach/board-dm6446evm.h
deleted file mode 100644
index 3216f21..0000000
--- a/arch/arm/mach-davinci/include/mach/board-dm6446evm.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * DaVinci DM6446 EVM board specific headers
- *
- * Author: Kevin Hilman, Deep Root Systems, LLC
- *
- * 2007 (c) Deep Root Systems, LLC. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or ifndef.
- */
-
-#ifndef _MACH_DAVINCI_DM6446EVM_H
-#define _MACH_DAVINCI_DM6446EVM_H
-
-#include <linux/types.h>
-
-int dm6446evm_eeprom_read(char *buf, off_t off, size_t count);
-int dm6446evm_eeprom_write(char *buf, off_t off, size_t count);
-
-#endif
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index 1917709..a1f03b6 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -17,7 +17,8 @@
 extern struct sys_timer davinci_timer;
 
 extern void davinci_irq_init(void);
-extern void davinci_map_common_io(void);
+extern void __iomem *davinci_intc_base;
+extern int davinci_intc_type;
 
 /* parameters describe VBUS sourcing for host mode */
 extern void setup_usb(unsigned mA, unsigned potpgt_msec);
@@ -25,4 +26,56 @@
 /* parameters describe VBUS sourcing for host mode */
 extern void setup_usb(unsigned mA, unsigned potpgt_msec);
 
+struct davinci_timer_instance {
+	void __iomem	*base;
+	u32		bottom_irq;
+	u32		top_irq;
+	unsigned long	cmp_off;
+	unsigned int	cmp_irq;
+};
+
+struct davinci_timer_info {
+	struct davinci_timer_instance	*timers;
+	unsigned int			clockevent_id;
+	unsigned int			clocksource_id;
+};
+
+/* SoC specific init support */
+struct davinci_soc_info {
+	struct map_desc			*io_desc;
+	unsigned long			io_desc_num;
+	u32				cpu_id;
+	u32				jtag_id;
+	void __iomem			*jtag_id_base;
+	struct davinci_id		*ids;
+	unsigned long			ids_num;
+	struct davinci_clk		*cpu_clks;
+	void __iomem			**psc_bases;
+	unsigned long			psc_bases_num;
+	void __iomem			*pinmux_base;
+	const struct mux_config		*pinmux_pins;
+	unsigned long			pinmux_pins_num;
+	void __iomem			*intc_base;
+	int				intc_type;
+	u8				*intc_irq_prios;
+	unsigned long			intc_irq_num;
+	struct davinci_timer_info	*timer_info;
+	void __iomem			*wdt_base;
+	void __iomem			*gpio_base;
+	unsigned			gpio_num;
+	unsigned			gpio_irq;
+	struct platform_device		*serial_dev;
+	struct emac_platform_data	*emac_pdata;
+	dma_addr_t			sram_dma;
+	unsigned			sram_len;
+};
+
+extern struct davinci_soc_info davinci_soc_info;
+
+extern void davinci_common_init(struct davinci_soc_info *soc_info);
+
+/* standard place to map on-chip SRAMs; they *may* support DMA */
+#define SRAM_VIRT	0xfffe0000
+#define SRAM_SIZE	SZ_128K
+
 #endif /* __ARCH_ARM_MACH_DAVINCI_COMMON_H */
diff --git a/arch/arm/mach-davinci/include/mach/cp_intc.h b/arch/arm/mach-davinci/include/mach/cp_intc.h
new file mode 100644
index 0000000..c4d27ee
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/cp_intc.h
@@ -0,0 +1,57 @@
+/*
+ * TI Common Platform Interrupt Controller (cp_intc) definitions
+ *
+ * Author: Steve Chen <schen@mvista.com>
+ * Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef __ASM_HARDWARE_CP_INTC_H
+#define __ASM_HARDWARE_CP_INTC_H
+
+#define CP_INTC_REV			0x00
+#define CP_INTC_CTRL			0x04
+#define CP_INTC_HOST_CTRL		0x0C
+#define CP_INTC_GLOBAL_ENABLE		0x10
+#define CP_INTC_GLOBAL_NESTING_LEVEL	0x1C
+#define CP_INTC_SYS_STAT_IDX_SET	0x20
+#define CP_INTC_SYS_STAT_IDX_CLR	0x24
+#define CP_INTC_SYS_ENABLE_IDX_SET	0x28
+#define CP_INTC_SYS_ENABLE_IDX_CLR	0x2C
+#define CP_INTC_GLOBAL_WAKEUP_ENABLE	0x30
+#define CP_INTC_HOST_ENABLE_IDX_SET	0x34
+#define CP_INTC_HOST_ENABLE_IDX_CLR	0x38
+#define CP_INTC_PACING_PRESCALE 	0x40
+#define CP_INTC_VECTOR_BASE		0x50
+#define CP_INTC_VECTOR_SIZE		0x54
+#define CP_INTC_VECTOR_NULL		0x58
+#define CP_INTC_PRIO_IDX		0x80
+#define CP_INTC_PRIO_VECTOR		0x84
+#define CP_INTC_SECURE_ENABLE		0x90
+#define CP_INTC_SECURE_PRIO_IDX 	0x94
+#define CP_INTC_PACING_PARAM(n) 	(0x0100 + (n << 4))
+#define CP_INTC_PACING_DEC(n)		(0x0104 + (n << 4))
+#define CP_INTC_PACING_MAP(n)		(0x0108 + (n << 4))
+#define CP_INTC_SYS_RAW_STAT(n) 	(0x0200 + (n << 2))
+#define CP_INTC_SYS_STAT_CLR(n) 	(0x0280 + (n << 2))
+#define CP_INTC_SYS_ENABLE_SET(n)	(0x0300 + (n << 2))
+#define CP_INTC_SYS_ENABLE_CLR(n)	(0x0380 + (n << 2))
+#define CP_INTC_CHAN_MAP(n)		(0x0400 + (n << 2))
+#define CP_INTC_HOST_MAP(n)		(0x0800 + (n << 2))
+#define CP_INTC_HOST_PRIO_IDX(n)	(0x0900 + (n << 2))
+#define CP_INTC_SYS_POLARITY(n) 	(0x0D00 + (n << 2))
+#define CP_INTC_SYS_TYPE(n)		(0x0D80 + (n << 2))
+#define CP_INTC_WAKEUP_ENABLE(n)	(0x0E00 + (n << 2))
+#define CP_INTC_DEBUG_SELECT(n) 	(0x0F00 + (n << 2))
+#define CP_INTC_SYS_SECURE_ENABLE(n)	(0x1000 + (n << 2))
+#define CP_INTC_HOST_NESTING_LEVEL(n)	(0x1100 + (n << 2))
+#define CP_INTC_HOST_ENABLE(n)		(0x1500 + (n << 2))
+#define CP_INTC_HOST_PRIO_VECTOR(n)	(0x1600 + (n << 2))
+#define CP_INTC_VECTOR_ADDR(n)		(0x2000 + (n << 2))
+
+void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
+			 u8 *irq_prio);
+
+#endif	/* __ASM_HARDWARE_CP_INTC_H */
diff --git a/arch/arm/mach-davinci/include/mach/cputype.h b/arch/arm/mach-davinci/include/mach/cputype.h
index 27cfb1b..d12a5ed 100644
--- a/arch/arm/mach-davinci/include/mach/cputype.h
+++ b/arch/arm/mach-davinci/include/mach/cputype.h
@@ -16,17 +16,30 @@
 #ifndef _ASM_ARCH_CPU_H
 #define _ASM_ARCH_CPU_H
 
-extern unsigned int davinci_rev(void);
+#include <mach/common.h>
 
-#define IS_DAVINCI_CPU(type, id)			\
-static inline int is_davinci_dm ##type(void)	        \
-{							\
-	return (davinci_rev() == (id)) ? 1 : 0;	        \
+struct davinci_id {
+	u8	variant;	/* JTAG ID bits 31:28 */
+	u16	part_no;	/* JTAG ID bits 27:12 */
+	u16	manufacturer;	/* JTAG ID bits 11:1 */
+	u32	cpu_id;
+	char	*name;
+};
+
+/* Can use lower 16 bits of cpu id  for a variant when required */
+#define	DAVINCI_CPU_ID_DM6446		0x64460000
+#define	DAVINCI_CPU_ID_DM6467		0x64670000
+#define	DAVINCI_CPU_ID_DM355		0x03550000
+
+#define IS_DAVINCI_CPU(type, id)					\
+static inline int is_davinci_ ##type(void)				\
+{									\
+	return (davinci_soc_info.cpu_id == (id));			\
 }
 
-IS_DAVINCI_CPU(644x, 0x6446)
-IS_DAVINCI_CPU(646x, 0x6467)
-IS_DAVINCI_CPU(355, 0x355)
+IS_DAVINCI_CPU(dm644x, DAVINCI_CPU_ID_DM6446)
+IS_DAVINCI_CPU(dm646x, DAVINCI_CPU_ID_DM6467)
+IS_DAVINCI_CPU(dm355, DAVINCI_CPU_ID_DM355)
 
 #ifdef CONFIG_ARCH_DAVINCI_DM644x
 #define cpu_is_davinci_dm644x() is_davinci_dm644x()
diff --git a/arch/arm/mach-davinci/include/mach/debug-macro.S b/arch/arm/mach-davinci/include/mach/debug-macro.S
index e6c0f0d..de3fc21 100644
--- a/arch/arm/mach-davinci/include/mach/debug-macro.S
+++ b/arch/arm/mach-davinci/include/mach/debug-macro.S
@@ -9,6 +9,16 @@
  * or implied.
  */
 
+/* Modifications
+ * Jan 2009	Chaithrika U S	Added senduart, busyuart, waituart
+ *				macros, based on debug-8250.S file
+ *				but using 32-bit accesses required for
+ *                              some davinci devices.
+ */
+
+#include <linux/serial_reg.h>
+#define UART_SHIFT	2
+
 		.macro addruart, rx
 		mrc	p15, 0, \rx, c1, c0
 		tst	\rx, #1			@ MMU enabled?
@@ -17,5 +27,22 @@
 		orr	\rx, \rx, #0x00c20000   @ UART 0
 		.endm
 
-#define UART_SHIFT	2
-#include <asm/hardware/debug-8250.S>
+		.macro	senduart,rd,rx
+		str	\rd, [\rx, #UART_TX << UART_SHIFT]
+		.endm
+
+		.macro	busyuart,rd,rx
+1002:		ldr	\rd, [\rx, #UART_LSR << UART_SHIFT]
+		and	\rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
+		teq	\rd, #UART_LSR_TEMT | UART_LSR_THRE
+		bne	1002b
+		.endm
+
+		.macro	waituart,rd,rx
+#ifdef FLOW_CONTROL
+1001:		ldr	\rd, [\rx, #UART_MSR << UART_SHIFT]
+		tst	\rd, #UART_MSR_CTS
+		beq	1001b
+#endif
+		.endm
+
diff --git a/arch/arm/mach-davinci/include/mach/dm355.h b/arch/arm/mach-davinci/include/mach/dm355.h
new file mode 100644
index 0000000..54903b7
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/dm355.h
@@ -0,0 +1,22 @@
+/*
+ * Chip specific defines for DM355 SoC
+ *
+ * Author: Kevin Hilman, Deep Root Systems, LLC
+ *
+ * 2007 (c) Deep Root Systems, LLC. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef __ASM_ARCH_DM355_H
+#define __ASM_ARCH_DM355_H
+
+#include <mach/hardware.h>
+
+struct spi_board_info;
+
+void __init dm355_init(void);
+void dm355_init_spi0(unsigned chipselect_mask,
+		struct spi_board_info *info, unsigned len);
+
+#endif /* __ASM_ARCH_DM355_H */
diff --git a/arch/arm/mach-davinci/include/mach/dm644x.h b/arch/arm/mach-davinci/include/mach/dm644x.h
index 3dcb9f4..15d42b9 100644
--- a/arch/arm/mach-davinci/include/mach/dm644x.h
+++ b/arch/arm/mach-davinci/include/mach/dm644x.h
@@ -24,6 +24,7 @@
 
 #include <linux/platform_device.h>
 #include <mach/hardware.h>
+#include <mach/emac.h>
 
 #define DM644X_EMAC_BASE		(0x01C80000)
 #define DM644X_EMAC_CNTRL_OFFSET	(0x0000)
diff --git a/arch/arm/mach-davinci/include/mach/dm646x.h b/arch/arm/mach-davinci/include/mach/dm646x.h
new file mode 100644
index 0000000..1fc764c
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/dm646x.h
@@ -0,0 +1,26 @@
+/*
+ * Chip specific defines for DM646x SoC
+ *
+ * Author: Kevin Hilman, Deep Root Systems, LLC
+ *
+ * 2007 (c) Deep Root Systems, LLC. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef __ASM_ARCH_DM646X_H
+#define __ASM_ARCH_DM646X_H
+
+#include <mach/hardware.h>
+#include <mach/emac.h>
+
+#define DM646X_EMAC_BASE		(0x01C80000)
+#define DM646X_EMAC_CNTRL_OFFSET	(0x0000)
+#define DM646X_EMAC_CNTRL_MOD_OFFSET	(0x1000)
+#define DM646X_EMAC_CNTRL_RAM_OFFSET	(0x2000)
+#define DM646X_EMAC_MDIO_OFFSET		(0x4000)
+#define DM646X_EMAC_CNTRL_RAM_SIZE	(0x2000)
+
+void __init dm646x_init(void);
+
+#endif /* __ASM_ARCH_DM646X_H */
diff --git a/arch/arm/mach-davinci/include/mach/edma.h b/arch/arm/mach-davinci/include/mach/edma.h
index f6fc539..24a3792 100644
--- a/arch/arm/mach-davinci/include/mach/edma.h
+++ b/arch/arm/mach-davinci/include/mach/edma.h
@@ -208,10 +208,6 @@
 void edma_pause(unsigned channel);
 void edma_resume(unsigned channel);
 
-/* UNRELATED TO DMA */
-int davinci_alloc_iram(unsigned size);
-void davinci_free_iram(unsigned addr, unsigned size);
-
 /* platform_data for EDMA driver */
 struct edma_soc_info {
 
diff --git a/arch/arm/mach-davinci/include/mach/emac.h b/arch/arm/mach-davinci/include/mach/emac.h
new file mode 100644
index 0000000..beff4fb
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/emac.h
@@ -0,0 +1,36 @@
+/*
+ * TI DaVinci EMAC platform support
+ *
+ * Author: Kevin Hilman, Deep Root Systems, LLC
+ *
+ * 2007 (c) Deep Root Systems, LLC. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef _MACH_DAVINCI_EMAC_H
+#define _MACH_DAVINCI_EMAC_H
+
+#include <linux/if_ether.h>
+#include <linux/memory.h>
+
+struct emac_platform_data {
+	char mac_addr[ETH_ALEN];
+	u32 ctrl_reg_offset;
+	u32 ctrl_mod_reg_offset;
+	u32 ctrl_ram_offset;
+	u32 mdio_reg_offset;
+	u32 ctrl_ram_size;
+	u32 phy_mask;
+	u32 mdio_max_freq;
+	u8 rmii_en;
+	u8 version;
+};
+
+enum {
+	EMAC_VERSION_1,	/* DM644x */
+	EMAC_VERSION_2,	/* DM646x */
+};
+
+void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context);
+#endif
diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S
index 039b84f..fbdebc7 100644
--- a/arch/arm/mach-davinci/include/mach/entry-macro.S
+++ b/arch/arm/mach-davinci/include/mach/entry-macro.S
@@ -15,17 +15,36 @@
 		.endm
 
 		.macro  get_irqnr_preamble, base, tmp
-		ldr \base, =IO_ADDRESS(DAVINCI_ARM_INTC_BASE)
+		ldr \base, =davinci_intc_base
+		ldr \base, [\base]
 		.endm
 
 		.macro  arch_ret_to_user, tmp1, tmp2
 		.endm
 
 		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
+#if defined(CONFIG_AINTC) && defined(CONFIG_CP_INTC)
+		ldr \tmp, =davinci_intc_type
+		ldr \tmp, [\tmp]
+		cmp \tmp, #DAVINCI_INTC_TYPE_CP_INTC
+		beq 1001f
+#endif
+#if defined(CONFIG_AINTC)
 		ldr \tmp, [\base, #0x14]
-		mov \tmp, \tmp, lsr #2
+		movs \tmp, \tmp, lsr #2
 		sub \irqnr, \tmp, #1
-		cmp \tmp, #0
+		b 1002f
+#endif
+#if defined(CONFIG_CP_INTC)
+1001:		ldr \irqnr, [\base, #0x80] /* get irq number */
+		and \irqnr, \irqnr, #0xff  /* irq is in bits 0-9 */
+		mov \tmp, \irqnr, lsr #3
+		and \tmp, \tmp, #0xfc
+		add \tmp, \tmp, #0x280 /* get the register offset */
+		ldr \irqstat, [\base, \tmp] /* get the intc status */
+		cmp \irqstat, #0x0
+#endif
+1002:
 		.endm
 
 		.macro	irq_prio_table
diff --git a/arch/arm/mach-davinci/include/mach/gpio.h b/arch/arm/mach-davinci/include/mach/gpio.h
index efe3281..ae07455 100644
--- a/arch/arm/mach-davinci/include/mach/gpio.h
+++ b/arch/arm/mach-davinci/include/mach/gpio.h
@@ -17,6 +17,7 @@
 #include <asm-generic/gpio.h>
 
 #include <mach/irqs.h>
+#include <mach/common.h>
 
 #define DAVINCI_GPIO_BASE 0x01C67000
 
@@ -67,15 +68,16 @@
 __gpio_to_controller(unsigned gpio)
 {
 	void *__iomem ptr;
+	void __iomem *base = davinci_soc_info.gpio_base;
 
 	if (gpio < 32 * 1)
-		ptr = IO_ADDRESS(DAVINCI_GPIO_BASE + 0x10);
+		ptr = base + 0x10;
 	else if (gpio < 32 * 2)
-		ptr = IO_ADDRESS(DAVINCI_GPIO_BASE + 0x38);
+		ptr = base + 0x38;
 	else if (gpio < 32 * 3)
-		ptr = IO_ADDRESS(DAVINCI_GPIO_BASE + 0x60);
+		ptr = base + 0x60;
 	else if (gpio < 32 * 4)
-		ptr = IO_ADDRESS(DAVINCI_GPIO_BASE + 0x88);
+		ptr = base + 0x88;
 	else
 		ptr = NULL;
 	return ptr;
@@ -142,13 +144,13 @@
 {
 	if (gpio >= DAVINCI_N_GPIO)
 		return -EINVAL;
-	return DAVINCI_N_AINTC_IRQ + gpio;
+	return davinci_soc_info.intc_irq_num + gpio;
 }
 
 static inline int irq_to_gpio(unsigned irq)
 {
 	/* caller guarantees gpio_to_irq() succeeded */
-	return irq - DAVINCI_N_AINTC_IRQ;
+	return irq - davinci_soc_info.intc_irq_num;
 }
 
 #endif				/* __DAVINCI_GPIO_H */
diff --git a/arch/arm/mach-davinci/include/mach/irqs.h b/arch/arm/mach-davinci/include/mach/irqs.h
index 1806607..bc5d6aa 100644
--- a/arch/arm/mach-davinci/include/mach/irqs.h
+++ b/arch/arm/mach-davinci/include/mach/irqs.h
@@ -30,6 +30,9 @@
 /* Base address */
 #define DAVINCI_ARM_INTC_BASE 0x01C48000
 
+#define DAVINCI_INTC_TYPE_AINTC		0
+#define DAVINCI_INTC_TYPE_CP_INTC	1
+
 /* Interrupt lines */
 #define IRQ_VDINT0       0
 #define IRQ_VDINT1       1
diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h
index 86c25c7..c712c7c 100644
--- a/arch/arm/mach-davinci/include/mach/memory.h
+++ b/arch/arm/mach-davinci/include/mach/memory.h
@@ -21,7 +21,6 @@
  * Definitions
  **************************************************************************/
 #define DAVINCI_DDR_BASE    0x80000000
-#define DAVINCI_IRAM_BASE   0x00008000 /* ARM Internal RAM */
 
 #define PHYS_OFFSET DAVINCI_DDR_BASE
 
diff --git a/arch/arm/mach-davinci/include/mach/mmc.h b/arch/arm/mach-davinci/include/mach/mmc.h
new file mode 100644
index 0000000..5a85e24
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/mmc.h
@@ -0,0 +1,33 @@
+/*
+ *  Board-specific MMC configuration
+ */
+
+#ifndef _DAVINCI_MMC_H
+#define _DAVINCI_MMC_H
+
+#include <linux/types.h>
+#include <linux/mmc/host.h>
+
+struct davinci_mmc_config {
+	/* get_cd()/get_wp() may sleep */
+	int	(*get_cd)(int module);
+	int	(*get_ro)(int module);
+	/* wires == 0 is equivalent to wires == 4 (4-bit parallel) */
+	u8	wires;
+
+	u32     max_freq;
+
+	/* any additional host capabilities: OR'd in to mmc->f_caps */
+	u32     caps;
+
+	/* Version of the MMC/SD controller */
+	u8	version;
+};
+void davinci_setup_mmc(int module, struct davinci_mmc_config *config);
+
+enum {
+	MMC_CTLR_VERSION_1 = 0,	/* DM644x and DM355 */
+	MMC_CTLR_VERSION_2,	/* DA830 */
+};
+
+#endif
diff --git a/arch/arm/mach-davinci/include/mach/mux.h b/arch/arm/mach-davinci/include/mach/mux.h
index bae22cb..2737845 100644
--- a/arch/arm/mach-davinci/include/mach/mux.h
+++ b/arch/arm/mach-davinci/include/mach/mux.h
@@ -19,16 +19,6 @@
 #ifndef __INC_MACH_MUX_H
 #define __INC_MACH_MUX_H
 
-/* System module registers */
-#define PINMUX0			0x00
-#define PINMUX1			0x04
-/* dm355 only */
-#define PINMUX2			0x08
-#define PINMUX3			0x0c
-#define PINMUX4			0x10
-#define INTMUX			0x18
-#define EVTMUX			0x1c
-
 struct mux_config {
 	const char *name;
 	const char *mux_reg_name;
@@ -168,15 +158,9 @@
 
 #ifdef CONFIG_DAVINCI_MUX
 /* setup pin muxing */
-extern void davinci_mux_init(void);
-extern int davinci_mux_register(const struct mux_config *pins,
-				unsigned long size);
 extern int davinci_cfg_reg(unsigned long reg_cfg);
 #else
 /* boot loader does it all (no warnings from CONFIG_DAVINCI_MUX_WARNINGS) */
-static inline void davinci_mux_init(void) {}
-static inline int davinci_mux_register(const struct mux_config *pins,
-				       unsigned long size) { return 0; }
 static inline int davinci_cfg_reg(unsigned long reg_cfg) { return 0; }
 #endif
 
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index 55a90d4..ab8a258 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -27,6 +27,8 @@
 #ifndef __ASM_ARCH_PSC_H
 #define __ASM_ARCH_PSC_H
 
+#define	DAVINCI_PWR_SLEEP_CNTRL_BASE	0x01C41000
+
 /* Power and Sleep Controller (PSC) Domains */
 #define DAVINCI_GPSC_ARMDOMAIN      0
 #define DAVINCI_GPSC_DSPDOMAIN      1
@@ -116,8 +118,8 @@
 #define DM646X_LPSC_TIMER1         35
 #define DM646X_LPSC_ARM_INTC       45
 
-extern int davinci_psc_is_clk_active(unsigned int id);
-extern void davinci_psc_config(unsigned int domain, unsigned int id,
-			       char enable);
+extern int davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id);
+extern void davinci_psc_config(unsigned int domain, unsigned int ctlr,
+		unsigned int id, char enable);
 
 #endif /* __ASM_ARCH_PSC_H */
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index 632847d..794fa5c 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -18,8 +18,6 @@
 #define DAVINCI_UART1_BASE	(IO_PHYS + 0x20400)
 #define DAVINCI_UART2_BASE	(IO_PHYS + 0x20800)
 
-#define DM355_UART2_BASE	(IO_PHYS + 0x206000)
-
 /* DaVinci UART register offsets */
 #define UART_DAVINCI_PWREMU		0x0c
 #define UART_DM646X_SCR			0x10
@@ -30,6 +28,6 @@
 	unsigned int enabled_uarts;
 };
 
-extern void davinci_serial_init(struct davinci_uart_config *);
+extern int davinci_serial_init(struct davinci_uart_config *);
 
 #endif /* __ASM_ARCH_SERIAL_H */
diff --git a/arch/arm/mach-davinci/include/mach/sram.h b/arch/arm/mach-davinci/include/mach/sram.h
new file mode 100644
index 0000000..111f7cc
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/sram.h
@@ -0,0 +1,27 @@
+/*
+ * mach/sram.h - DaVinci simple SRAM allocator
+ *
+ * Copyright (C) 2009 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_SRAM_H
+#define __MACH_SRAM_H
+
+/* ARBITRARY:  SRAM allocations are multiples of this 2^N size */
+#define SRAM_GRANULARITY	512
+
+/*
+ * SRAM allocations return a CPU virtual address, or NULL on error.
+ * If a DMA address is requested and the SRAM supports DMA, its
+ * mapped address is also returned.
+ *
+ * Errors include SRAM memory not being available, and requesting
+ * DMA mapped SRAM on systems which don't allow that.
+ */
+extern void *sram_alloc(size_t len, dma_addr_t *dma);
+extern void sram_free(void *addr, size_t len);
+
+#endif /* __MACH_SRAM_H */
diff --git a/arch/arm/mach-davinci/include/mach/time.h b/arch/arm/mach-davinci/include/mach/time.h
new file mode 100644
index 0000000..1c971d8
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/time.h
@@ -0,0 +1,35 @@
+/*
+ * Local header file for DaVinci time code.
+ *
+ * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef __ARCH_ARM_MACH_DAVINCI_TIME_H
+#define __ARCH_ARM_MACH_DAVINCI_TIME_H
+
+#define DAVINCI_TIMER0_BASE		(IO_PHYS + 0x21400)
+#define DAVINCI_TIMER1_BASE		(IO_PHYS + 0x21800)
+#define DAVINCI_WDOG_BASE		(IO_PHYS + 0x21C00)
+
+enum {
+	T0_BOT,
+	T0_TOP,
+	T1_BOT,
+	T1_TOP,
+	NUM_TIMERS
+};
+
+#define IS_TIMER1(id)		(id & 0x2)
+#define IS_TIMER0(id)		(!IS_TIMER1(id))
+#define IS_TIMER_TOP(id)	((id & 0x1))
+#define IS_TIMER_BOT(id)	(!IS_TIMER_TOP(id))
+
+#define ID_TO_TIMER(id)		(IS_TIMER1(id) != 0)
+
+extern struct davinci_timer_instance davinci_timer_instance[];
+
+#endif /* __ARCH_ARM_MACH_DAVINCI_TIME_H */
diff --git a/arch/arm/mach-davinci/include/mach/uncompress.h b/arch/arm/mach-davinci/include/mach/uncompress.h
index 8c165de..1e27475 100644
--- a/arch/arm/mach-davinci/include/mach/uncompress.h
+++ b/arch/arm/mach-davinci/include/mach/uncompress.h
@@ -13,11 +13,24 @@
 #include <linux/serial_reg.h>
 #include <mach/serial.h>
 
+#include <asm/mach-types.h>
+
+extern unsigned int __machine_arch_type;
+
+static u32 *uart;
+
+static u32 *get_uart_base(void)
+{
+	/* Add logic here for new platforms, using __macine_arch_type */
+	return (u32 *)DAVINCI_UART0_BASE;
+}
+
 /* PORT_16C550A, in polled non-fifo mode */
 
 static void putc(char c)
 {
-	volatile u32 *uart = (volatile void *) DAVINCI_UART0_BASE;
+	if (!uart)
+		uart = get_uart_base();
 
 	while (!(uart[UART_LSR] & UART_LSR_THRE))
 		barrier();
@@ -26,7 +39,9 @@
 
 static inline void flush(void)
 {
-	volatile u32 *uart = (volatile void *) DAVINCI_UART0_BASE;
+	if (!uart)
+		uart = get_uart_base();
+
 	while (!(uart[UART_LSR] & UART_LSR_THRE))
 		barrier();
 }
diff --git a/arch/arm/mach-davinci/io.c b/arch/arm/mach-davinci/io.c
index a548abb..49912b4 100644
--- a/arch/arm/mach-davinci/io.c
+++ b/arch/arm/mach-davinci/io.c
@@ -9,47 +9,9 @@
  */
 
 #include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/io.h>
 
 #include <asm/tlb.h>
-#include <asm/memory.h>
-
-#include <asm/mach/map.h>
-#include <mach/clock.h>
-
-extern void davinci_check_revision(void);
-
-/*
- * The machine specific code may provide the extra mapping besides the
- * default mapping provided here.
- */
-static struct map_desc davinci_io_desc[] __initdata = {
-	{
-		.virtual	= IO_VIRT,
-		.pfn		= __phys_to_pfn(IO_PHYS),
-		.length		= IO_SIZE,
-		.type		= MT_DEVICE
-	},
-};
-
-void __init davinci_map_common_io(void)
-{
-	iotable_init(davinci_io_desc, ARRAY_SIZE(davinci_io_desc));
-
-	/* Normally devicemaps_init() would flush caches and tlb after
-	 * mdesc->map_io(), but we must also do it here because of the CPU
-	 * revision check below.
-	 */
-	local_flush_tlb_all();
-	flush_cache_all();
-
-	/* We want to check CPU revision early for cpu_is_xxxx() macros.
-	 * IO space mapping must be initialized before we can do that.
-	 */
-	davinci_check_revision();
-}
 
 #define BETWEEN(p, st, sz)	((p) >= (st) && (p) < ((st) + (sz)))
 #define XLATE(p, pst, vst)	((void __iomem *)((p) - (pst) + (vst)))
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c
index 5a324c9..af92ffe 100644
--- a/arch/arm/mach-davinci/irq.c
+++ b/arch/arm/mach-davinci/irq.c
@@ -26,6 +26,7 @@
 
 #include <mach/hardware.h>
 #include <mach/cputype.h>
+#include <mach/common.h>
 #include <asm/mach/irq.h>
 
 #define IRQ_BIT(irq)		((irq) & 0x1f)
@@ -41,18 +42,14 @@
 #define IRQ_INTPRI0_REG_OFFSET	0x0030
 #define IRQ_INTPRI7_REG_OFFSET	0x004C
 
-const u8 *davinci_def_priorities;
-
-#define INTC_BASE IO_ADDRESS(DAVINCI_ARM_INTC_BASE)
-
 static inline unsigned int davinci_irq_readl(int offset)
 {
-	return __raw_readl(INTC_BASE + offset);
+	return __raw_readl(davinci_intc_base + offset);
 }
 
 static inline void davinci_irq_writel(unsigned long value, int offset)
 {
-	__raw_writel(value, INTC_BASE + offset);
+	__raw_writel(value, davinci_intc_base + offset);
 }
 
 /* Disable interrupt */
@@ -113,217 +110,11 @@
 	.unmask = davinci_unmask_irq,
 };
 
-/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */
-static const u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] __initdata = {
-	[IRQ_VDINT0]		= 2,
-	[IRQ_VDINT1]		= 6,
-	[IRQ_VDINT2]		= 6,
-	[IRQ_HISTINT]		= 6,
-	[IRQ_H3AINT]		= 6,
-	[IRQ_PRVUINT]		= 6,
-	[IRQ_RSZINT]		= 6,
-	[7]			= 7,
-	[IRQ_VENCINT]		= 6,
-	[IRQ_ASQINT]		= 6,
-	[IRQ_IMXINT]		= 6,
-	[IRQ_VLCDINT]		= 6,
-	[IRQ_USBINT]		= 4,
-	[IRQ_EMACINT]		= 4,
-	[14]			= 7,
-	[15]			= 7,
-	[IRQ_CCINT0]		= 5,	/* dma */
-	[IRQ_CCERRINT]		= 5,	/* dma */
-	[IRQ_TCERRINT0]		= 5,	/* dma */
-	[IRQ_TCERRINT]		= 5,	/* dma */
-	[IRQ_PSCIN]		= 7,
-	[21]			= 7,
-	[IRQ_IDE]		= 4,
-	[23]			= 7,
-	[IRQ_MBXINT]		= 7,
-	[IRQ_MBRINT]		= 7,
-	[IRQ_MMCINT]		= 7,
-	[IRQ_SDIOINT]		= 7,
-	[28]			= 7,
-	[IRQ_DDRINT]		= 7,
-	[IRQ_AEMIFINT]		= 7,
-	[IRQ_VLQINT]		= 4,
-	[IRQ_TINT0_TINT12]	= 2,	/* clockevent */
-	[IRQ_TINT0_TINT34]	= 2,	/* clocksource */
-	[IRQ_TINT1_TINT12]	= 7,	/* DSP timer */
-	[IRQ_TINT1_TINT34]	= 7,	/* system tick */
-	[IRQ_PWMINT0]		= 7,
-	[IRQ_PWMINT1]		= 7,
-	[IRQ_PWMINT2]		= 7,
-	[IRQ_I2C]		= 3,
-	[IRQ_UARTINT0]		= 3,
-	[IRQ_UARTINT1]		= 3,
-	[IRQ_UARTINT2]		= 3,
-	[IRQ_SPINT0]		= 3,
-	[IRQ_SPINT1]		= 3,
-	[45]			= 7,
-	[IRQ_DSP2ARM0]		= 4,
-	[IRQ_DSP2ARM1]		= 4,
-	[IRQ_GPIO0]		= 7,
-	[IRQ_GPIO1]		= 7,
-	[IRQ_GPIO2]		= 7,
-	[IRQ_GPIO3]		= 7,
-	[IRQ_GPIO4]		= 7,
-	[IRQ_GPIO5]		= 7,
-	[IRQ_GPIO6]		= 7,
-	[IRQ_GPIO7]		= 7,
-	[IRQ_GPIOBNK0]		= 7,
-	[IRQ_GPIOBNK1]		= 7,
-	[IRQ_GPIOBNK2]		= 7,
-	[IRQ_GPIOBNK3]		= 7,
-	[IRQ_GPIOBNK4]		= 7,
-	[IRQ_COMMTX]		= 7,
-	[IRQ_COMMRX]		= 7,
-	[IRQ_EMUINT]		= 7,
-};
-
-static const u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
-	[IRQ_DM646X_VP_VERTINT0]        = 7,
-	[IRQ_DM646X_VP_VERTINT1]        = 7,
-	[IRQ_DM646X_VP_VERTINT2]        = 7,
-	[IRQ_DM646X_VP_VERTINT3]        = 7,
-	[IRQ_DM646X_VP_ERRINT]          = 7,
-	[IRQ_DM646X_RESERVED_1]         = 7,
-	[IRQ_DM646X_RESERVED_2]         = 7,
-	[IRQ_DM646X_WDINT]              = 7,
-	[IRQ_DM646X_CRGENINT0]          = 7,
-	[IRQ_DM646X_CRGENINT1]          = 7,
-	[IRQ_DM646X_TSIFINT0]           = 7,
-	[IRQ_DM646X_TSIFINT1]           = 7,
-	[IRQ_DM646X_VDCEINT]            = 7,
-	[IRQ_DM646X_USBINT]             = 7,
-	[IRQ_DM646X_USBDMAINT]          = 7,
-	[IRQ_DM646X_PCIINT]             = 7,
-	[IRQ_CCINT0]                    = 7,    /* dma */
-	[IRQ_CCERRINT]                  = 7,    /* dma */
-	[IRQ_TCERRINT0]                 = 7,    /* dma */
-	[IRQ_TCERRINT]                  = 7,    /* dma */
-	[IRQ_DM646X_TCERRINT2]          = 7,
-	[IRQ_DM646X_TCERRINT3]          = 7,
-	[IRQ_DM646X_IDE]                = 7,
-	[IRQ_DM646X_HPIINT]             = 7,
-	[IRQ_DM646X_EMACRXTHINT]        = 7,
-	[IRQ_DM646X_EMACRXINT]          = 7,
-	[IRQ_DM646X_EMACTXINT]          = 7,
-	[IRQ_DM646X_EMACMISCINT]        = 7,
-	[IRQ_DM646X_MCASP0TXINT]        = 7,
-	[IRQ_DM646X_MCASP0RXINT]        = 7,
-	[IRQ_AEMIFINT]                  = 7,
-	[IRQ_DM646X_RESERVED_3]         = 7,
-	[IRQ_DM646X_MCASP1TXINT]        = 7,    /* clockevent */
-	[IRQ_TINT0_TINT34]              = 7,    /* clocksource */
-	[IRQ_TINT1_TINT12]              = 7,    /* DSP timer */
-	[IRQ_TINT1_TINT34]              = 7,    /* system tick */
-	[IRQ_PWMINT0]                   = 7,
-	[IRQ_PWMINT1]                   = 7,
-	[IRQ_DM646X_VLQINT]             = 7,
-	[IRQ_I2C]                       = 7,
-	[IRQ_UARTINT0]                  = 7,
-	[IRQ_UARTINT1]                  = 7,
-	[IRQ_DM646X_UARTINT2]           = 7,
-	[IRQ_DM646X_SPINT0]             = 7,
-	[IRQ_DM646X_SPINT1]             = 7,
-	[IRQ_DM646X_DSP2ARMINT]         = 7,
-	[IRQ_DM646X_RESERVED_4]         = 7,
-	[IRQ_DM646X_PSCINT]             = 7,
-	[IRQ_DM646X_GPIO0]              = 7,
-	[IRQ_DM646X_GPIO1]              = 7,
-	[IRQ_DM646X_GPIO2]              = 7,
-	[IRQ_DM646X_GPIO3]              = 7,
-	[IRQ_DM646X_GPIO4]              = 7,
-	[IRQ_DM646X_GPIO5]              = 7,
-	[IRQ_DM646X_GPIO6]              = 7,
-	[IRQ_DM646X_GPIO7]              = 7,
-	[IRQ_DM646X_GPIOBNK0]           = 7,
-	[IRQ_DM646X_GPIOBNK1]           = 7,
-	[IRQ_DM646X_GPIOBNK2]           = 7,
-	[IRQ_DM646X_DDRINT]             = 7,
-	[IRQ_DM646X_AEMIFINT]           = 7,
-	[IRQ_COMMTX]                    = 7,
-	[IRQ_COMMRX]                    = 7,
-	[IRQ_EMUINT]                    = 7,
-};
-
-static const u8 dm355_default_priorities[DAVINCI_N_AINTC_IRQ] = {
-	[IRQ_DM355_CCDC_VDINT0]		= 2,
-	[IRQ_DM355_CCDC_VDINT1]		= 6,
-	[IRQ_DM355_CCDC_VDINT2]		= 6,
-	[IRQ_DM355_IPIPE_HST]		= 6,
-	[IRQ_DM355_H3AINT]		= 6,
-	[IRQ_DM355_IPIPE_SDR]		= 6,
-	[IRQ_DM355_IPIPEIFINT]		= 6,
-	[IRQ_DM355_OSDINT]		= 7,
-	[IRQ_DM355_VENCINT]		= 6,
-	[IRQ_ASQINT]			= 6,
-	[IRQ_IMXINT]			= 6,
-	[IRQ_USBINT]			= 4,
-	[IRQ_DM355_RTOINT]		= 4,
-	[IRQ_DM355_UARTINT2]		= 7,
-	[IRQ_DM355_TINT6]		= 7,
-	[IRQ_CCINT0]			= 5,	/* dma */
-	[IRQ_CCERRINT]			= 5,	/* dma */
-	[IRQ_TCERRINT0]			= 5,	/* dma */
-	[IRQ_TCERRINT]			= 5,	/* dma */
-	[IRQ_DM355_SPINT2_1]		= 7,
-	[IRQ_DM355_TINT7]		= 4,
-	[IRQ_DM355_SDIOINT0]		= 7,
-	[IRQ_MBXINT]			= 7,
-	[IRQ_MBRINT]			= 7,
-	[IRQ_MMCINT]			= 7,
-	[IRQ_DM355_MMCINT1]		= 7,
-	[IRQ_DM355_PWMINT3]		= 7,
-	[IRQ_DDRINT]			= 7,
-	[IRQ_AEMIFINT]			= 7,
-	[IRQ_DM355_SDIOINT1]		= 4,
-	[IRQ_TINT0_TINT12]		= 2,	/* clockevent */
-	[IRQ_TINT0_TINT34]		= 2,	/* clocksource */
-	[IRQ_TINT1_TINT12]		= 7,	/* DSP timer */
-	[IRQ_TINT1_TINT34]		= 7,	/* system tick */
-	[IRQ_PWMINT0]			= 7,
-	[IRQ_PWMINT1]			= 7,
-	[IRQ_PWMINT2]			= 7,
-	[IRQ_I2C]			= 3,
-	[IRQ_UARTINT0]			= 3,
-	[IRQ_UARTINT1]			= 3,
-	[IRQ_DM355_SPINT0_0]		= 3,
-	[IRQ_DM355_SPINT0_1]		= 3,
-	[IRQ_DM355_GPIO0]		= 3,
-	[IRQ_DM355_GPIO1]		= 7,
-	[IRQ_DM355_GPIO2]		= 4,
-	[IRQ_DM355_GPIO3]		= 4,
-	[IRQ_DM355_GPIO4]		= 7,
-	[IRQ_DM355_GPIO5]		= 7,
-	[IRQ_DM355_GPIO6]		= 7,
-	[IRQ_DM355_GPIO7]		= 7,
-	[IRQ_DM355_GPIO8]		= 7,
-	[IRQ_DM355_GPIO9]		= 7,
-	[IRQ_DM355_GPIOBNK0]		= 7,
-	[IRQ_DM355_GPIOBNK1]		= 7,
-	[IRQ_DM355_GPIOBNK2]		= 7,
-	[IRQ_DM355_GPIOBNK3]		= 7,
-	[IRQ_DM355_GPIOBNK4]		= 7,
-	[IRQ_DM355_GPIOBNK5]		= 7,
-	[IRQ_DM355_GPIOBNK6]		= 7,
-	[IRQ_COMMTX]			= 7,
-	[IRQ_COMMRX]			= 7,
-	[IRQ_EMUINT]			= 7,
-};
-
 /* ARM Interrupt Controller Initialization */
 void __init davinci_irq_init(void)
 {
 	unsigned i;
-
-	if (cpu_is_davinci_dm644x())
-		davinci_def_priorities = dm644x_default_priorities;
-	else if (cpu_is_davinci_dm646x())
-		davinci_def_priorities = dm646x_default_priorities;
-	else if (cpu_is_davinci_dm355())
-		davinci_def_priorities = dm355_default_priorities;
+	const u8 *davinci_def_priorities = davinci_soc_info.intc_irq_prios;
 
 	/* Clear all interrupt requests */
 	davinci_irq_writel(~0x0, FIQ_REG0_OFFSET);
diff --git a/arch/arm/mach-davinci/mux.c b/arch/arm/mach-davinci/mux.c
index bbba0b2..d310f57 100644
--- a/arch/arm/mach-davinci/mux.c
+++ b/arch/arm/mach-davinci/mux.c
@@ -21,18 +21,7 @@
 
 #include <mach/hardware.h>
 #include <mach/mux.h>
-
-static const struct mux_config *mux_table;
-static unsigned long pin_table_sz;
-
-int __init davinci_mux_register(const struct mux_config *pins,
-				unsigned long size)
-{
-	mux_table = pins;
-	pin_table_sz = size;
-
-	return 0;
-}
+#include <mach/common.h>
 
 /*
  * Sets the DAVINCI MUX register based on the table
@@ -40,23 +29,24 @@
 int __init_or_module davinci_cfg_reg(const unsigned long index)
 {
 	static DEFINE_SPINLOCK(mux_spin_lock);
-	void __iomem *base = IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE);
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
+	void __iomem *base = soc_info->pinmux_base;
 	unsigned long flags;
 	const struct mux_config *cfg;
 	unsigned int reg_orig = 0, reg = 0;
 	unsigned int mask, warn = 0;
 
-	if (!mux_table)
+	if (!soc_info->pinmux_pins)
 		BUG();
 
-	if (index >= pin_table_sz) {
+	if (index >= soc_info->pinmux_pins_num) {
 		printk(KERN_ERR "Invalid pin mux index: %lu (%lu)\n",
-		       index, pin_table_sz);
+		       index, soc_info->pinmux_pins_num);
 		dump_stack();
 		return -ENODEV;
 	}
 
-	cfg = &mux_table[index];
+	cfg = &soc_info->pinmux_pins[index];
 
 	if (cfg->name == NULL) {
 		printk(KERN_ERR "No entry for the specified index\n");
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index 84171ab..a78b657 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -28,8 +28,6 @@
 #include <mach/psc.h>
 #include <mach/mux.h>
 
-#define DAVINCI_PWR_SLEEP_CNTRL_BASE 0x01C41000
-
 /* PSC register offsets */
 #define EPCPR		0x070
 #define PTCMD		0x120
@@ -42,22 +40,42 @@
 #define MDSTAT_STATE_MASK 0x1f
 
 /* Return nonzero iff the domain's clock is active */
-int __init davinci_psc_is_clk_active(unsigned int id)
+int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
 {
-	void __iomem *psc_base = IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE);
-	u32 mdstat = __raw_readl(psc_base + MDSTAT + 4 * id);
+	void __iomem *psc_base;
+	u32 mdstat;
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
+
+	if (!soc_info->psc_bases || (ctlr >= soc_info->psc_bases_num)) {
+		pr_warning("PSC: Bad psc data: 0x%x[%d]\n",
+				(int)soc_info->psc_bases, ctlr);
+		return 0;
+	}
+
+	psc_base = soc_info->psc_bases[ctlr];
+	mdstat = __raw_readl(psc_base + MDSTAT + 4 * id);
 
 	/* if clocked, state can be "Enable" or "SyncReset" */
 	return mdstat & BIT(12);
 }
 
 /* Enable or disable a PSC domain */
-void davinci_psc_config(unsigned int domain, unsigned int id, char enable)
+void davinci_psc_config(unsigned int domain, unsigned int ctlr,
+		unsigned int id, char enable)
 {
 	u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
-	void __iomem *psc_base = IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE);
+	void __iomem *psc_base;
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
 	u32 next_state = enable ? 0x3 : 0x2; /* 0x3 enables, 0x2 disables */
 
+	if (!soc_info->psc_bases || (ctlr >= soc_info->psc_bases_num)) {
+		pr_warning("PSC: Bad psc data: 0x%x[%d]\n",
+				(int)soc_info->psc_bases, ctlr);
+		return;
+	}
+
+	psc_base = soc_info->psc_bases[ctlr];
+
 	mdctl = __raw_readl(psc_base + MDCTL + 4 * id);
 	mdctl &= ~MDSTAT_STATE_MASK;
 	mdctl |= next_state;
diff --git a/arch/arm/mach-davinci/serial.c b/arch/arm/mach-davinci/serial.c
index 6950757..c530c73 100644
--- a/arch/arm/mach-davinci/serial.c
+++ b/arch/arm/mach-davinci/serial.c
@@ -33,6 +33,8 @@
 #include <mach/serial.h>
 #include <mach/irqs.h>
 #include <mach/cputype.h>
+#include <mach/common.h>
+
 #include "clock.h"
 
 static inline unsigned int serial_read_reg(struct plat_serial8250_port *up,
@@ -49,44 +51,6 @@
 	__raw_writel(value, IO_ADDRESS(p->mapbase) + offset);
 }
 
-static struct plat_serial8250_port serial_platform_data[] = {
-	{
-		.mapbase	= DAVINCI_UART0_BASE,
-		.irq		= IRQ_UARTINT0,
-		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
-				  UPF_IOREMAP,
-		.iotype		= UPIO_MEM,
-		.regshift	= 2,
-	},
-	{
-		.mapbase	= DAVINCI_UART1_BASE,
-		.irq		= IRQ_UARTINT1,
-		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
-				  UPF_IOREMAP,
-		.iotype		= UPIO_MEM,
-		.regshift	= 2,
-	},
-	{
-		.mapbase	= DAVINCI_UART2_BASE,
-		.irq		= IRQ_UARTINT2,
-		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
-				  UPF_IOREMAP,
-		.iotype		= UPIO_MEM,
-		.regshift	= 2,
-	},
-	{
-		.flags		= 0
-	},
-};
-
-static struct platform_device serial_device = {
-	.name			= "serial8250",
-	.id			= PLAT8250_DEV_PLATFORM,
-	.dev			= {
-		.platform_data	= serial_platform_data,
-	},
-};
-
 static void __init davinci_serial_reset(struct plat_serial8250_port *p)
 {
 	unsigned int pwremu = 0;
@@ -106,35 +70,22 @@
 				 UART_DM646X_SCR_TX_WATERMARK);
 }
 
-void __init davinci_serial_init(struct davinci_uart_config *info)
+int __init davinci_serial_init(struct davinci_uart_config *info)
 {
 	int i;
 	char name[16];
 	struct clk *uart_clk;
-	struct device *dev = &serial_device.dev;
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
+	struct device *dev = &soc_info->serial_dev->dev;
+	struct plat_serial8250_port *p = dev->platform_data;
 
 	/*
 	 * Make sure the serial ports are muxed on at this point.
-	 * You have to mux them off in device drivers later on
-	 * if not needed.
+	 * You have to mux them off in device drivers later on if not needed.
 	 */
-	for (i = 0; i < DAVINCI_MAX_NR_UARTS; i++) {
-		struct plat_serial8250_port *p = serial_platform_data + i;
-
-		if (!(info->enabled_uarts & (1 << i))) {
-			p->flags = 0;
+	for (i = 0; i < DAVINCI_MAX_NR_UARTS; i++, p++) {
+		if (!(info->enabled_uarts & (1 << i)))
 			continue;
-		}
-
-		if (cpu_is_davinci_dm646x())
-			p->iotype = UPIO_MEM32;
-
-		if (cpu_is_davinci_dm355()) {
-			if (i == 2) {
-				p->mapbase = (unsigned long)DM355_UART2_BASE;
-				p->irq = IRQ_DM355_UARTINT2;
-			}
-		}
 
 		sprintf(name, "uart%d", i);
 		uart_clk = clk_get(dev, name);
@@ -147,11 +98,6 @@
 			davinci_serial_reset(p);
 		}
 	}
-}
 
-static int __init davinci_init(void)
-{
-	return platform_device_register(&serial_device);
+	return platform_device_register(soc_info->serial_dev);
 }
-
-arch_initcall(davinci_init);
diff --git a/arch/arm/mach-davinci/sram.c b/arch/arm/mach-davinci/sram.c
new file mode 100644
index 0000000..db54b2a
--- /dev/null
+++ b/arch/arm/mach-davinci/sram.c
@@ -0,0 +1,74 @@
+/*
+ * mach-davinci/sram.c - DaVinci simple SRAM allocator
+ *
+ * Copyright (C) 2009 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/genalloc.h>
+
+#include <mach/common.h>
+#include <mach/memory.h>
+#include <mach/sram.h>
+
+
+static struct gen_pool *sram_pool;
+
+void *sram_alloc(size_t len, dma_addr_t *dma)
+{
+	unsigned long vaddr;
+	dma_addr_t dma_base = davinci_soc_info.sram_dma;
+
+	if (dma)
+		*dma = 0;
+	if (!sram_pool || (dma && !dma_base))
+		return NULL;
+
+	vaddr = gen_pool_alloc(sram_pool, len);
+	if (!vaddr)
+		return NULL;
+
+	if (dma)
+		*dma = dma_base + (vaddr - SRAM_VIRT);
+	return (void *)vaddr;
+
+}
+EXPORT_SYMBOL(sram_alloc);
+
+void sram_free(void *addr, size_t len)
+{
+	gen_pool_free(sram_pool, (unsigned long) addr, len);
+}
+EXPORT_SYMBOL(sram_free);
+
+
+/*
+ * REVISIT This supports CPU and DMA access to/from SRAM, but it
+ * doesn't (yet?) support some other notable uses of SRAM:  as TCM
+ * for data and/or instructions; and holding code needed to enter
+ * and exit suspend states (while DRAM can't be used).
+ */
+static int __init sram_init(void)
+{
+	unsigned len = davinci_soc_info.sram_len;
+	int status = 0;
+
+	if (len) {
+		len = min(len, SRAM_SIZE);
+		sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1);
+		if (!sram_pool)
+			status = -ENOMEM;
+	}
+	if (sram_pool)
+		status = gen_pool_add(sram_pool, SRAM_VIRT, len, -1);
+	WARN_ON(status < 0);
+	return status;
+}
+core_initcall(sram_init);
+
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 494e01b..0884ca5 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -19,6 +19,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/device.h>
+#include <linux/platform_device.h>
 
 #include <mach/hardware.h>
 #include <asm/system.h>
@@ -28,52 +29,41 @@
 #include <asm/errno.h>
 #include <mach/io.h>
 #include <mach/cputype.h>
+#include <mach/time.h>
 #include "clock.h"
 
 static struct clock_event_device clockevent_davinci;
 static unsigned int davinci_clock_tick_rate;
 
-#define DAVINCI_TIMER0_BASE (IO_PHYS + 0x21400)
-#define DAVINCI_TIMER1_BASE (IO_PHYS + 0x21800)
-#define DAVINCI_WDOG_BASE   (IO_PHYS + 0x21C00)
-
-enum {
-	T0_BOT = 0, T0_TOP, T1_BOT, T1_TOP, NUM_TIMERS,
-};
-
-#define IS_TIMER1(id)    (id & 0x2)
-#define IS_TIMER0(id)    (!IS_TIMER1(id))
-#define IS_TIMER_TOP(id) ((id & 0x1))
-#define IS_TIMER_BOT(id) (!IS_TIMER_TOP(id))
-
-static int timer_irqs[NUM_TIMERS] = {
-	IRQ_TINT0_TINT12,
-	IRQ_TINT0_TINT34,
-	IRQ_TINT1_TINT12,
-	IRQ_TINT1_TINT34,
-};
-
 /*
  * This driver configures the 2 64-bit count-up timers as 4 independent
  * 32-bit count-up timers used as follows:
- *
- * T0_BOT: Timer 0, bottom:  clockevent source for hrtimers
- * T0_TOP: Timer 0, top   :  clocksource for generic timekeeping
- * T1_BOT: Timer 1, bottom:  (used by DSP in TI DSPLink code)
- * T1_TOP: Timer 1, top   :  <unused>
  */
-#define TID_CLOCKEVENT  T0_BOT
-#define TID_CLOCKSOURCE T0_TOP
+
+enum {
+	TID_CLOCKEVENT,
+	TID_CLOCKSOURCE,
+};
 
 /* Timer register offsets */
-#define PID12                        0x0
-#define TIM12                        0x10
-#define TIM34                        0x14
-#define PRD12                        0x18
-#define PRD34                        0x1c
-#define TCR                          0x20
-#define TGCR                         0x24
-#define WDTCR                        0x28
+#define PID12			0x0
+#define TIM12			0x10
+#define TIM34			0x14
+#define PRD12			0x18
+#define PRD34			0x1c
+#define TCR			0x20
+#define TGCR			0x24
+#define WDTCR			0x28
+
+/* Offsets of the 8 compare registers */
+#define	CMP12_0			0x60
+#define	CMP12_1			0x64
+#define	CMP12_2			0x68
+#define	CMP12_3			0x6c
+#define	CMP12_4			0x70
+#define	CMP12_5			0x74
+#define	CMP12_6			0x78
+#define	CMP12_7			0x7c
 
 /* Timer register bitfields */
 #define TCR_ENAMODE_DISABLE          0x0
@@ -105,6 +95,7 @@
 	unsigned int id;
 	unsigned long period;
 	unsigned long opts;
+	unsigned long flags;
 	void __iomem *base;
 	unsigned long tim_off;
 	unsigned long prd_off;
@@ -114,30 +105,58 @@
 static struct timer_s timers[];
 
 /* values for 'opts' field of struct timer_s */
-#define TIMER_OPTS_DISABLED   0x00
-#define TIMER_OPTS_ONESHOT    0x01
-#define TIMER_OPTS_PERIODIC   0x02
+#define TIMER_OPTS_DISABLED		0x01
+#define TIMER_OPTS_ONESHOT		0x02
+#define TIMER_OPTS_PERIODIC		0x04
+#define TIMER_OPTS_STATE_MASK		0x07
+
+#define TIMER_OPTS_USE_COMPARE		0x80000000
+#define USING_COMPARE(t)		((t)->opts & TIMER_OPTS_USE_COMPARE)
+
+static char *id_to_name[] = {
+	[T0_BOT]	= "timer0_0",
+	[T0_TOP]	= "timer0_1",
+	[T1_BOT]	= "timer1_0",
+	[T1_TOP]	= "timer1_1",
+};
 
 static int timer32_config(struct timer_s *t)
 {
-	u32 tcr = __raw_readl(t->base + TCR);
+	u32 tcr;
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
 
-	/* disable timer */
-	tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
-	__raw_writel(tcr, t->base + TCR);
+	if (USING_COMPARE(t)) {
+		struct davinci_timer_instance *dtip =
+				soc_info->timer_info->timers;
+		int event_timer = ID_TO_TIMER(timers[TID_CLOCKEVENT].id);
 
-	/* reset counter to zero, set new period */
-	__raw_writel(0, t->base + t->tim_off);
-	__raw_writel(t->period, t->base + t->prd_off);
+		/*
+		 * Next interrupt should be the current time reg value plus
+		 * the new period (using 32-bit unsigned addition/wrapping
+		 * to 0 on overflow).  This assumes that the clocksource
+		 * is setup to count to 2^32-1 before wrapping around to 0.
+		 */
+		__raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
+			t->base + dtip[event_timer].cmp_off);
+	} else {
+		tcr = __raw_readl(t->base + TCR);
 
-	/* Set enable mode */
-	if (t->opts & TIMER_OPTS_ONESHOT) {
-		tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
-	} else if (t->opts & TIMER_OPTS_PERIODIC) {
-		tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
+		/* disable timer */
+		tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
+		__raw_writel(tcr, t->base + TCR);
+
+		/* reset counter to zero, set new period */
+		__raw_writel(0, t->base + t->tim_off);
+		__raw_writel(t->period, t->base + t->prd_off);
+
+		/* Set enable mode */
+		if (t->opts & TIMER_OPTS_ONESHOT)
+			tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
+		else if (t->opts & TIMER_OPTS_PERIODIC)
+			tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
+
+		__raw_writel(tcr, t->base + TCR);
 	}
-
-	__raw_writel(tcr, t->base + TCR);
 	return 0;
 }
 
@@ -182,13 +201,14 @@
 
 static void __init timer_init(void)
 {
-	u32 phys_bases[] = {DAVINCI_TIMER0_BASE, DAVINCI_TIMER1_BASE};
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
+	struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
 	int i;
 
 	/* Global init of each 64-bit timer as a whole */
 	for(i=0; i<2; i++) {
 		u32 tgcr;
-		void __iomem *base = IO_ADDRESS(phys_bases[i]);
+		void __iomem *base = dtip[i].base;
 
 		/* Disabled, Internal clock source */
 		__raw_writel(0, base + TCR);
@@ -214,33 +234,33 @@
 	/* Init of each timer as a 32-bit timer */
 	for (i=0; i< ARRAY_SIZE(timers); i++) {
 		struct timer_s *t = &timers[i];
-		u32 phys_base;
+		int timer = ID_TO_TIMER(t->id);
+		u32 irq;
 
-		if (t->name) {
-			t->id = i;
-			phys_base = (IS_TIMER1(t->id) ?
-			       DAVINCI_TIMER1_BASE : DAVINCI_TIMER0_BASE);
-			t->base = IO_ADDRESS(phys_base);
+		t->base = dtip[timer].base;
 
-			if (IS_TIMER_BOT(t->id)) {
-				t->enamode_shift = 6;
-				t->tim_off = TIM12;
-				t->prd_off = PRD12;
-			} else {
-				t->enamode_shift = 22;
-				t->tim_off = TIM34;
-				t->prd_off = PRD34;
-			}
-
-			/* Register interrupt */
-			t->irqaction.name = t->name;
-			t->irqaction.dev_id = (void *)t;
-			if (t->irqaction.handler != NULL) {
-				setup_irq(timer_irqs[t->id], &t->irqaction);
-			}
-
-			timer32_config(&timers[i]);
+		if (IS_TIMER_BOT(t->id)) {
+			t->enamode_shift = 6;
+			t->tim_off = TIM12;
+			t->prd_off = PRD12;
+			irq = dtip[timer].bottom_irq;
+		} else {
+			t->enamode_shift = 22;
+			t->tim_off = TIM34;
+			t->prd_off = PRD34;
+			irq = dtip[timer].top_irq;
 		}
+
+		/* Register interrupt */
+		t->irqaction.name = t->name;
+		t->irqaction.dev_id = (void *)t;
+
+		if (t->irqaction.handler != NULL) {
+			irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
+			setup_irq(irq, &t->irqaction);
+		}
+
+		timer32_config(&timers[i]);
 	}
 }
 
@@ -255,7 +275,6 @@
 }
 
 static struct clocksource clocksource_davinci = {
-	.name		= "timer0_1",
 	.rating		= 300,
 	.read		= read_cycles,
 	.mask		= CLOCKSOURCE_MASK(32),
@@ -284,15 +303,18 @@
 	switch (mode) {
 	case CLOCK_EVT_MODE_PERIODIC:
 		t->period = davinci_clock_tick_rate / (HZ);
-		t->opts = TIMER_OPTS_PERIODIC;
+		t->opts &= ~TIMER_OPTS_STATE_MASK;
+		t->opts |= TIMER_OPTS_PERIODIC;
 		timer32_config(t);
 		break;
 	case CLOCK_EVT_MODE_ONESHOT:
-		t->opts = TIMER_OPTS_ONESHOT;
+		t->opts &= ~TIMER_OPTS_STATE_MASK;
+		t->opts |= TIMER_OPTS_ONESHOT;
 		break;
 	case CLOCK_EVT_MODE_UNUSED:
 	case CLOCK_EVT_MODE_SHUTDOWN:
-		t->opts = TIMER_OPTS_DISABLED;
+		t->opts &= ~TIMER_OPTS_STATE_MASK;
+		t->opts |= TIMER_OPTS_DISABLED;
 		break;
 	case CLOCK_EVT_MODE_RESUME:
 		break;
@@ -300,7 +322,6 @@
 }
 
 static struct clock_event_device clockevent_davinci = {
-	.name		= "timer0_0",
 	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.shift		= 32,
 	.set_next_event	= davinci_set_next_event,
@@ -311,10 +332,42 @@
 static void __init davinci_timer_init(void)
 {
 	struct clk *timer_clk;
-
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
+	unsigned int clockevent_id;
+	unsigned int clocksource_id;
 	static char err[] __initdata = KERN_ERR
 		"%s: can't register clocksource!\n";
 
+	clockevent_id = soc_info->timer_info->clockevent_id;
+	clocksource_id = soc_info->timer_info->clocksource_id;
+
+	timers[TID_CLOCKEVENT].id = clockevent_id;
+	timers[TID_CLOCKSOURCE].id = clocksource_id;
+
+	/*
+	 * If using same timer for both clock events & clocksource,
+	 * a compare register must be used to generate an event interrupt.
+	 * This is equivalent to a oneshot timer only (not periodic).
+	 */
+	if (clockevent_id == clocksource_id) {
+		struct davinci_timer_instance *dtip =
+				soc_info->timer_info->timers;
+		int event_timer = ID_TO_TIMER(clockevent_id);
+
+		/* Only bottom timers can use compare regs */
+		if (IS_TIMER_TOP(clockevent_id))
+			pr_warning("davinci_timer_init: Invalid use"
+				" of system timers.  Results unpredictable.\n");
+		else if ((dtip[event_timer].cmp_off == 0)
+				|| (dtip[event_timer].cmp_irq == 0))
+			pr_warning("davinci_timer_init:  Invalid timer instance"
+				" setup.  Results unpredictable.\n");
+		else {
+			timers[TID_CLOCKEVENT].opts |= TIMER_OPTS_USE_COMPARE;
+			clockevent_davinci.features = CLOCK_EVT_FEAT_ONESHOT;
+		}
+	}
+
 	/* init timer hw */
 	timer_init();
 
@@ -325,6 +378,7 @@
 	davinci_clock_tick_rate = clk_get_rate(timer_clk);
 
 	/* setup clocksource */
+	clocksource_davinci.name = id_to_name[clocksource_id];
 	clocksource_davinci.mult =
 		clocksource_khz2mult(davinci_clock_tick_rate/1000,
 				     clocksource_davinci.shift);
@@ -332,12 +386,12 @@
 		printk(err, clocksource_davinci.name);
 
 	/* setup clockevent */
+	clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
 	clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC,
 					 clockevent_davinci.shift);
 	clockevent_davinci.max_delta_ns =
 		clockevent_delta2ns(0xfffffffe, &clockevent_davinci);
-	clockevent_davinci.min_delta_ns =
-		clockevent_delta2ns(1, &clockevent_davinci);
+	clockevent_davinci.min_delta_ns = 50000; /* 50 usec */
 
 	clockevent_davinci.cpumask = cpumask_of(0);
 	clockevents_register_device(&clockevent_davinci);
@@ -349,15 +403,14 @@
 
 
 /* reset board using watchdog timer */
-void davinci_watchdog_reset(void) {
+void davinci_watchdog_reset(void)
+{
 	u32 tgcr, wdtcr;
-	void __iomem *base = IO_ADDRESS(DAVINCI_WDOG_BASE);
-	struct device dev;
+	struct davinci_soc_info *soc_info = &davinci_soc_info;
+	void __iomem *base = soc_info->wdt_base;
 	struct clk *wd_clk;
-	char *name = "watchdog";
 
-	dev_set_name(&dev, name);
-	wd_clk = clk_get(&dev, NULL);
+	wd_clk = clk_get(&davinci_wdt_device.dev, NULL);
 	if (WARN_ON(IS_ERR(wd_clk)))
 		return;
 	clk_enable(wd_clk);
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index e8ebeae..b2eede5 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -21,15 +21,50 @@
 #include <asm/div64.h>
 #include <mach/hardware.h>
 
+
+/*
+ * The EP93xx has two external crystal oscillators.  To generate the
+ * required high-frequency clocks, the processor uses two phase-locked-
+ * loops (PLLs) to multiply the incoming external clock signal to much
+ * higher frequencies that are then divided down by programmable dividers
+ * to produce the needed clocks.  The PLLs operate independently of one
+ * another.
+ */
+#define EP93XX_EXT_CLK_RATE	14745600
+#define EP93XX_EXT_RTC_RATE	32768
+
+
 struct clk {
 	unsigned long	rate;
 	int		users;
+	int		sw_locked;
 	u32		enable_reg;
 	u32		enable_mask;
+
+	unsigned long	(*get_rate)(struct clk *clk);
 };
 
-static struct clk clk_uart = {
-	.rate		= 14745600,
+
+static unsigned long get_uart_rate(struct clk *clk);
+
+
+static struct clk clk_uart1 = {
+	.sw_locked	= 1,
+	.enable_reg	= EP93XX_SYSCON_DEVICE_CONFIG,
+	.enable_mask	= EP93XX_SYSCON_DEVICE_CONFIG_U1EN,
+	.get_rate	= get_uart_rate,
+};
+static struct clk clk_uart2 = {
+	.sw_locked	= 1,
+	.enable_reg	= EP93XX_SYSCON_DEVICE_CONFIG,
+	.enable_mask	= EP93XX_SYSCON_DEVICE_CONFIG_U2EN,
+	.get_rate	= get_uart_rate,
+};
+static struct clk clk_uart3 = {
+	.sw_locked	= 1,
+	.enable_reg	= EP93XX_SYSCON_DEVICE_CONFIG,
+	.enable_mask	= EP93XX_SYSCON_DEVICE_CONFIG_U3EN,
+	.get_rate	= get_uart_rate,
 };
 static struct clk clk_pll1;
 static struct clk clk_f;
@@ -95,9 +130,9 @@
 	{ .dev_id = dev, .con_id = con, .clk = ck }
 
 static struct clk_lookup clocks[] = {
-	INIT_CK("apb:uart1", NULL, &clk_uart),
-	INIT_CK("apb:uart2", NULL, &clk_uart),
-	INIT_CK("apb:uart3", NULL, &clk_uart),
+	INIT_CK("apb:uart1", NULL, &clk_uart1),
+	INIT_CK("apb:uart2", NULL, &clk_uart2),
+	INIT_CK("apb:uart3", NULL, &clk_uart3),
 	INIT_CK(NULL, "pll1", &clk_pll1),
 	INIT_CK(NULL, "fclk", &clk_f),
 	INIT_CK(NULL, "hclk", &clk_h),
@@ -125,6 +160,8 @@
 		u32 value;
 
 		value = __raw_readl(clk->enable_reg);
+		if (clk->sw_locked)
+			__raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
 		__raw_writel(value | clk->enable_mask, clk->enable_reg);
 	}
 
@@ -138,13 +175,29 @@
 		u32 value;
 
 		value = __raw_readl(clk->enable_reg);
+		if (clk->sw_locked)
+			__raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
 		__raw_writel(value & ~clk->enable_mask, clk->enable_reg);
 	}
 }
 EXPORT_SYMBOL(clk_disable);
 
+static unsigned long get_uart_rate(struct clk *clk)
+{
+	u32 value;
+
+	value = __raw_readl(EP93XX_SYSCON_CLOCK_CONTROL);
+	if (value & EP93XX_SYSCON_CLOCK_UARTBAUD)
+		return EP93XX_EXT_CLK_RATE;
+	else
+		return EP93XX_EXT_CLK_RATE / 2;
+}
+
 unsigned long clk_get_rate(struct clk *clk)
 {
+	if (clk->get_rate)
+		return clk->get_rate(clk);
+
 	return clk->rate;
 }
 EXPORT_SYMBOL(clk_get_rate);
@@ -162,7 +215,7 @@
 	unsigned long long rate;
 	int i;
 
-	rate = 14745600;
+	rate = EP93XX_EXT_CLK_RATE;
 	rate *= ((config_word >> 11) & 0x1f) + 1;		/* X1FBD */
 	rate *= ((config_word >> 5) & 0x3f) + 1;		/* X2FBD */
 	do_div(rate, (config_word & 0x1f) + 1);			/* X2IPD */
@@ -195,7 +248,7 @@
 
 	value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1);
 	if (!(value & 0x00800000)) {			/* PLL1 bypassed?  */
-		clk_pll1.rate = 14745600;
+		clk_pll1.rate = EP93XX_EXT_CLK_RATE;
 	} else {
 		clk_pll1.rate = calc_pll_rate(value);
 	}
@@ -206,7 +259,7 @@
 
 	value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2);
 	if (!(value & 0x00080000)) {			/* PLL2 bypassed?  */
-		clk_pll2.rate = 14745600;
+		clk_pll2.rate = EP93XX_EXT_CLK_RATE;
 	} else if (value & 0x00040000) {		/* PLL2 enabled?  */
 		clk_pll2.rate = calc_pll_rate(value);
 	} else {
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
index 78ac1bd..420f71b 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
@@ -160,7 +160,10 @@
 #define EP93XX_SYSCON_CLOCK_SET1	EP93XX_SYSCON_REG(0x20)
 #define EP93XX_SYSCON_CLOCK_SET2	EP93XX_SYSCON_REG(0x24)
 #define EP93XX_SYSCON_DEVICE_CONFIG	EP93XX_SYSCON_REG(0x80)
-#define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE	0x00800000
+#define EP93XX_SYSCON_DEVICE_CONFIG_U3EN		(1<<24)
+#define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE	(1<<23)
+#define EP93XX_SYSCON_DEVICE_CONFIG_U2EN		(1<<20)
+#define EP93XX_SYSCON_DEVICE_CONFIG_U1EN		(1<<18)
 #define EP93XX_SYSCON_SWLOCK		EP93XX_SYSCON_REG(0xc0)
 
 #define EP93XX_WATCHDOG_BASE		(EP93XX_APB_VIRT_BASE + 0x00140000)
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 6f88729..a0f60e5 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -121,7 +121,7 @@
 	.rate	= 14745600,
 };
 
-static struct clk_lookup lookups[] __initdata = {
+static struct clk_lookup lookups[] = {
 	{	/* UART0 */
 		.dev_id		= "mb:16",
 		.clk		= &uartclk,
diff --git a/arch/arm/mach-l7200/include/mach/sys-clock.h b/arch/arm/mach-l7200/include/mach/sys-clock.h
index 2d7722b..e9729a3 100644
--- a/arch/arm/mach-l7200/include/mach/sys-clock.h
+++ b/arch/arm/mach-l7200/include/mach/sys-clock.h
@@ -18,7 +18,7 @@
 
 /* IO_START and IO_BASE are defined in hardware.h */
 
-#define SYS_CLOCK_START (IO_START + SYS_CLCOK_OFF)  /* Physical address */
+#define SYS_CLOCK_START (IO_START + SYS_CLOCK_OFF)  /* Physical address */
 #define SYS_CLOCK_BASE  (IO_BASE  + SYS_CLOCK_OFF)  /* Virtual address  */
 
 /* Define the interface to the SYS_CLOCK */
diff --git a/arch/arm/mach-omap2/clock24xx.c b/arch/arm/mach-omap2/clock24xx.c
index efc59c4..e4cef33 100644
--- a/arch/arm/mach-omap2/clock24xx.c
+++ b/arch/arm/mach-omap2/clock24xx.c
@@ -103,10 +103,10 @@
 	CLK(NULL,	"mdm_ick",	&mdm_ick,	CK_243X),
 	CLK(NULL,	"mdm_osc_ck",	&mdm_osc_ck,	CK_243X),
 	/* DSS domain clocks */
-	CLK(NULL,	"dss_ick",	&dss_ick,	CK_243X | CK_242X),
-	CLK(NULL,	"dss1_fck",	&dss1_fck,	CK_243X | CK_242X),
-	CLK(NULL,	"dss2_fck",	&dss2_fck,	CK_243X | CK_242X),
-	CLK(NULL,	"dss_54m_fck",	&dss_54m_fck,	CK_243X | CK_242X),
+	CLK("omapfb",	"ick",		&dss_ick,	CK_243X | CK_242X),
+	CLK("omapfb",	"dss1_fck",	&dss1_fck,	CK_243X | CK_242X),
+	CLK("omapfb",	"dss2_fck",	&dss2_fck,	CK_243X | CK_242X),
+	CLK("omapfb",	"tv_fck",	&dss_54m_fck,	CK_243X | CK_242X),
 	/* L3 domain clocks */
 	CLK(NULL,	"core_l3_ck",	&core_l3_ck,	CK_243X | CK_242X),
 	CLK(NULL,	"ssi_fck",	&ssi_ssr_sst_fck, CK_243X | CK_242X),
@@ -206,7 +206,7 @@
 	CLK(NULL,	"aes_ick",	&aes_ick,	CK_243X | CK_242X),
 	CLK(NULL,	"pka_ick",	&pka_ick,	CK_243X | CK_242X),
 	CLK(NULL,	"usb_fck",	&usb_fck,	CK_243X | CK_242X),
-	CLK(NULL,	"usbhs_ick",	&usbhs_ick,	CK_243X),
+	CLK("musb_hdrc",	"ick",	&usbhs_ick,	CK_243X),
 	CLK("mmci-omap-hs.0", "ick",	&mmchs1_ick,	CK_243X),
 	CLK("mmci-omap-hs.0", "fck",	&mmchs1_fck,	CK_243X),
 	CLK("mmci-omap-hs.1", "ick",	&mmchs2_ick,	CK_243X),
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 0a14dca..ba05aa4 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -157,7 +157,7 @@
 	CLK(NULL,	"ssi_ssr_fck",	&ssi_ssr_fck,	CK_343X),
 	CLK(NULL,	"ssi_sst_fck",	&ssi_sst_fck,	CK_343X),
 	CLK(NULL,	"core_l3_ick",	&core_l3_ick,	CK_343X),
-	CLK(NULL,	"hsotgusb_ick",	&hsotgusb_ick,	CK_343X),
+	CLK("musb_hdrc",	"ick",	&hsotgusb_ick,	CK_343X),
 	CLK(NULL,	"sdrc_ick",	&sdrc_ick,	CK_343X),
 	CLK(NULL,	"gpmc_fck",	&gpmc_fck,	CK_343X),
 	CLK(NULL,	"security_l3_ick", &security_l3_ick, CK_343X),
@@ -197,11 +197,11 @@
 	CLK("omap_rng",	"ick",		&rng_ick,	CK_343X),
 	CLK(NULL,	"sha11_ick",	&sha11_ick,	CK_343X),
 	CLK(NULL,	"des1_ick",	&des1_ick,	CK_343X),
-	CLK(NULL,	"dss1_alwon_fck", &dss1_alwon_fck, CK_343X),
-	CLK(NULL,	"dss_tv_fck",	&dss_tv_fck,	CK_343X),
-	CLK(NULL,	"dss_96m_fck",	&dss_96m_fck,	CK_343X),
-	CLK(NULL,	"dss2_alwon_fck", &dss2_alwon_fck, CK_343X),
-	CLK(NULL,	"dss_ick",	&dss_ick,	CK_343X),
+	CLK("omapfb",	"dss1_fck",	&dss1_alwon_fck, CK_343X),
+	CLK("omapfb",	"tv_fck",	&dss_tv_fck,	CK_343X),
+	CLK("omapfb",	"video_fck",	&dss_96m_fck,	CK_343X),
+	CLK("omapfb",	"dss2_fck",	&dss2_alwon_fck, CK_343X),
+	CLK("omapfb",	"ick",		&dss_ick,	CK_343X),
 	CLK(NULL,	"cam_mclk",	&cam_mclk,	CK_343X),
 	CLK(NULL,	"cam_ick",	&cam_ick,	CK_343X),
 	CLK(NULL,	"csi2_96m_fck",	&csi2_96m_fck,	CK_343X),
diff --git a/arch/arm/mach-omap2/clock34xx.h b/arch/arm/mach-omap2/clock34xx.h
index 6763b8f..017a30e 100644
--- a/arch/arm/mach-omap2/clock34xx.h
+++ b/arch/arm/mach-omap2/clock34xx.h
@@ -2182,7 +2182,7 @@
 
 static struct clk gpio1_dbck = {
 	.name		= "gpio1_dbck",
-	.ops		= &clkops_omap2_dflt_wait,
+	.ops		= &clkops_omap2_dflt,
 	.parent		= &wkup_32k_fck,
 	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
 	.enable_bit	= OMAP3430_EN_GPIO1_SHIFT,
@@ -2427,7 +2427,7 @@
 
 static struct clk gpio6_dbck = {
 	.name		= "gpio6_dbck",
-	.ops		= &clkops_omap2_dflt_wait,
+	.ops		= &clkops_omap2_dflt,
 	.parent		= &per_32k_alwon_fck,
 	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
 	.enable_bit	= OMAP3430_EN_GPIO6_SHIFT,
@@ -2437,7 +2437,7 @@
 
 static struct clk gpio5_dbck = {
 	.name		= "gpio5_dbck",
-	.ops		= &clkops_omap2_dflt_wait,
+	.ops		= &clkops_omap2_dflt,
 	.parent		= &per_32k_alwon_fck,
 	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
 	.enable_bit	= OMAP3430_EN_GPIO5_SHIFT,
@@ -2447,7 +2447,7 @@
 
 static struct clk gpio4_dbck = {
 	.name		= "gpio4_dbck",
-	.ops		= &clkops_omap2_dflt_wait,
+	.ops		= &clkops_omap2_dflt,
 	.parent		= &per_32k_alwon_fck,
 	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
 	.enable_bit	= OMAP3430_EN_GPIO4_SHIFT,
@@ -2457,7 +2457,7 @@
 
 static struct clk gpio3_dbck = {
 	.name		= "gpio3_dbck",
-	.ops		= &clkops_omap2_dflt_wait,
+	.ops		= &clkops_omap2_dflt,
 	.parent		= &per_32k_alwon_fck,
 	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
 	.enable_bit	= OMAP3430_EN_GPIO3_SHIFT,
@@ -2467,7 +2467,7 @@
 
 static struct clk gpio2_dbck = {
 	.name		= "gpio2_dbck",
-	.ops		= &clkops_omap2_dflt_wait,
+	.ops		= &clkops_omap2_dflt,
 	.parent		= &per_32k_alwon_fck,
 	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
 	.enable_bit	= OMAP3430_EN_GPIO2_SHIFT,
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 496983a..894cc35 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -354,10 +354,12 @@
 	platform_device_register(&omap2_mcspi1);
 	platform_device_register(&omap2_mcspi2);
 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
-	platform_device_register(&omap2_mcspi3);
+	if (cpu_is_omap2430() || cpu_is_omap343x())
+		platform_device_register(&omap2_mcspi3);
 #endif
 #ifdef CONFIG_ARCH_OMAP3
-	platform_device_register(&omap2_mcspi4);
+	if (cpu_is_omap343x())
+		platform_device_register(&omap2_mcspi4);
 #endif
 }
 
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index c6a7940..9fd03a2 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -409,7 +409,7 @@
 /* PM_PREPWSTST_CAM specific bits */
 
 /* PM_PWSTCTRL_USBHOST specific bits */
-#define OMAP3430ES2_SAVEANDRESTORE_SHIFT		(1 << 4)
+#define OMAP3430ES2_SAVEANDRESTORE_SHIFT		4
 
 /* RM_RSTST_PER specific bits */
 
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index 8df55f4..8622c24 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -187,7 +187,7 @@
 	unsigned	sysclk_ps;
 	int		status;
 
-	if (!refclk_psec || sysclk_ps == 0)
+	if (!refclk_psec || fclk_ps == 0)
 		return -ENODEV;
 
 	sysclk_ps = is_refclk ? refclk_psec : TUSB6010_OSCCLK_60;
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 0e65344..dd031cc 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -46,6 +46,7 @@
 #include <mach/audio.h>
 #include <mach/pxafb.h>
 #include <mach/i2c.h>
+#include <mach/regs-uart.h>
 #include <mach/viper.h>
 
 #include <asm/setup.h>
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 942e1a7..076acbc 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -750,14 +750,6 @@
 {
 	u32 val;
 
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-	/*
-	 * The dummy clock device has to be registered before the main device
-	 * so that the latter will broadcast the clock events
-	 */
-	local_timer_setup();
-#endif
-
 	/* 
 	 * set clock frequency: 
 	 *	REALVIEW_REFCLK is 32KHz
diff --git a/arch/arm/mach-realview/include/mach/smp.h b/arch/arm/mach-realview/include/mach/smp.h
index 515819e..dd53892 100644
--- a/arch/arm/mach-realview/include/mach/smp.h
+++ b/arch/arm/mach-realview/include/mach/smp.h
@@ -15,16 +15,9 @@
 /*
  * We use IRQ1 as the IPI
  */
-static inline void smp_cross_call(cpumask_t callmap)
+static inline void smp_cross_call(const struct cpumask *mask)
 {
-	gic_raise_softirq(callmap, 1);
-}
-
-/*
- * Do nothing on MPcore.
- */
-static inline void smp_cross_call_done(cpumask_t callmap)
-{
+	gic_raise_softirq(mask, 1);
 }
 
 #endif
diff --git a/arch/arm/mach-realview/localtimer.c b/arch/arm/mach-realview/localtimer.c
index d0d39ad..1c01d13 100644
--- a/arch/arm/mach-realview/localtimer.c
+++ b/arch/arm/mach-realview/localtimer.c
@@ -189,8 +189,10 @@
 	struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
 
 	clk->name		= "dummy_timer";
-	clk->features		= CLOCK_EVT_FEAT_DUMMY;
-	clk->rating		= 200;
+	clk->features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_DUMMY;
+	clk->rating		= 400;
 	clk->mult               = 1;
 	clk->set_mode		= dummy_timer_set_mode;
 	clk->broadcast		= smp_timer_broadcast;
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index ea3c755..30a9c68 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -78,13 +78,6 @@
 	trace_hardirqs_off();
 
 	/*
-	 * the primary core may have used a "cross call" soft interrupt
-	 * to get this processor out of WFI in the BootMonitor - make
-	 * sure that we are no longer being sent this soft interrupt
-	 */
-	smp_cross_call_done(cpumask_of_cpu(cpu));
-
-	/*
 	 * if any interrupts are already enabled for the primary
 	 * core (e.g. timer irq), then they will not have been enabled
 	 * for us: do so
@@ -136,7 +129,7 @@
 	 * Use smp_cross_call() for this, since there's little
 	 * point duplicating the code here
 	 */
-	smp_cross_call(cpumask_of_cpu(cpu));
+	smp_cross_call(cpumask_of(cpu));
 
 	timeout = jiffies + (1 * HZ);
 	while (time_before(jiffies, timeout)) {
@@ -224,11 +217,9 @@
 	if (max_cpus > ncores)
 		max_cpus = ncores;
 
-#ifdef CONFIG_LOCAL_TIMERS
+#if defined(CONFIG_LOCAL_TIMERS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
 	/*
-	 * Enable the local timer for primary CPU. If the device is
-	 * dummy (!CONFIG_LOCAL_TIMERS), it was already registers in
-	 * realview_timer_init
+	 * Enable the local timer or broadcast device for the boot CPU.
 	 */
 	local_timer_setup();
 #endif
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index 4389c16..8637dea 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -588,8 +588,6 @@
 
 	s3c_device_nand.dev.platform_data = &bast_nand_info;
 
-	s3c_i2c0_set_platdata(&bast_i2c_info);
-
 	s3c24xx_init_io(bast_iodesc, ARRAY_SIZE(bast_iodesc));
 	s3c24xx_init_clocks(0);
 	s3c24xx_init_uarts(bast_uartcfgs, ARRAY_SIZE(bast_uartcfgs));
@@ -602,6 +600,7 @@
 	sysdev_class_register(&bast_pm_sysclass);
 	sysdev_register(&bast_pm_sysdev);
 
+	s3c_i2c0_set_platdata(&bast_i2c_info);
 	s3c24xx_fb_set_platdata(&bast_fb_info);
 	platform_add_devices(bast_devices, ARRAY_SIZE(bast_devices));
 
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 1f929c3..b3bebcc 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -413,7 +413,7 @@
 	.rate	= 24000000,
 };
 
-static struct clk_lookup lookups[] __initdata = {
+static struct clk_lookup lookups[] = {
 	{	/* UART0 */
 		.dev_id		= "dev:f1",
 		.clk		= &ref24_clk,
diff --git a/arch/arm/nwfpe/fpa11.h b/arch/arm/nwfpe/fpa11.h
index 386cbd1..d3a6f92 100644
--- a/arch/arm/nwfpe/fpa11.h
+++ b/arch/arm/nwfpe/fpa11.h
@@ -114,4 +114,8 @@
 extern unsigned int DoubleCPDO(struct roundingData *roundData,
 			       const unsigned int opcode, FPREG * rFd);
 
+/* extneded_cpdo.c */
+extern unsigned int ExtendedCPDO(struct roundingData *roundData,
+				 const unsigned int opcode, FPREG * rFd);
+
 #endif
diff --git a/arch/arm/nwfpe/fpa11_cprt.c b/arch/arm/nwfpe/fpa11_cprt.c
index 9843dc5..31c4eee 100644
--- a/arch/arm/nwfpe/fpa11_cprt.c
+++ b/arch/arm/nwfpe/fpa11_cprt.c
@@ -27,10 +27,6 @@
 #include "fpmodule.inl"
 #include "softfloat.h"
 
-#ifdef CONFIG_FPE_NWFPE_XP
-extern flag floatx80_is_nan(floatx80);
-#endif
-
 unsigned int PerformFLT(const unsigned int opcode);
 unsigned int PerformFIX(const unsigned int opcode);
 
diff --git a/arch/arm/nwfpe/softfloat.h b/arch/arm/nwfpe/softfloat.h
index 260fe29..13e479c 100644
--- a/arch/arm/nwfpe/softfloat.h
+++ b/arch/arm/nwfpe/softfloat.h
@@ -226,6 +226,8 @@
 char floatx80_lt_quiet( floatx80, floatx80 );
 char floatx80_is_signaling_nan( floatx80 );
 
+extern flag floatx80_is_nan(floatx80);
+
 #endif
 
 static inline flag extractFloat32Sign(float32 a)
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
index ce6b4ba..3746222 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -206,9 +206,10 @@
 			config_invalid = 1;
 			return;
 		}
-		if (rg.paddr)
+		if (rg.paddr) {
 			reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT);
-		reserved += rg.size;
+			reserved += rg.size;
+		}
 		omapfb_config.mem_desc.region[i] = rg;
 		configured_regions++;
 	}
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 17d7afe..ee0b21f 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -307,7 +307,7 @@
 		return 0;
 	if (cpu_is_omap24xx() && gpio < 128)
 		return 0;
-	if (cpu_is_omap34xx() && gpio < 160)
+	if (cpu_is_omap34xx() && gpio < 192)
 		return 0;
 	return -1;
 }
diff --git a/arch/arm/plat-s3c/clock.c b/arch/arm/plat-s3c/clock.c
index b6be76e..4d01ef1 100644
--- a/arch/arm/plat-s3c/clock.c
+++ b/arch/arm/plat-s3c/clock.c
@@ -306,8 +306,6 @@
 
 int s3c24xx_register_clock(struct clk *clk)
 {
-	clk->owner = THIS_MODULE;
-
 	if (clk->enable == NULL)
 		clk->enable = clk_null_enable;
 
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index aee2aeb..07326f6 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1235,7 +1235,7 @@
 
 EXPORT_SYMBOL(s3c2410_dma_getposition);
 
-static struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev)
+static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev)
 {
 	return container_of(dev, struct s3c2410_dma_chan, dev);
 }
diff --git a/arch/arm/plat-s3c64xx/gpiolib.c b/arch/arm/plat-s3c64xx/gpiolib.c
index ee9188a..78ee52c 100644
--- a/arch/arm/plat-s3c64xx/gpiolib.c
+++ b/arch/arm/plat-s3c64xx/gpiolib.c
@@ -57,7 +57,7 @@
 #if 1
 #define gpio_dbg(x...) do { } while(0)
 #else
-#define gpio_dbg(x...) printk(KERN_DEBUG ## x)
+#define gpio_dbg(x...) printk(KERN_DEBUG x)
 #endif
 
 /* The s3c64xx_gpiolib_4bit routines are to control the gpio banks where
diff --git a/arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h b/arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h
index 8154951..2ba1767 100644
--- a/arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h
+++ b/arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h
@@ -61,14 +61,14 @@
 #define S3C64XX_GPH7_ADDR_CF1		(0x06 << 28)
 #define S3C64XX_GPH7_EINT_G6_7		(0x07 << 28)
 
-#define S3C64XX_GPH8_MMC1_DATA6		(0x02 << 32)
-#define S3C64XX_GPH8_MMC2_DATA2		(0x03 << 32)
-#define S3C64XX_GPH8_I2S_V40_LRCLK	(0x05 << 32)
-#define S3C64XX_GPH8_ADDR_CF2		(0x06 << 32)
-#define S3C64XX_GPH8_EINT_G6_8		(0x07 << 32)
+#define S3C64XX_GPH8_MMC1_DATA6		(0x02 <<  0)
+#define S3C64XX_GPH8_MMC2_DATA2		(0x03 <<  0)
+#define S3C64XX_GPH8_I2S_V40_LRCLK	(0x05 <<  0)
+#define S3C64XX_GPH8_ADDR_CF2		(0x06 <<  0)
+#define S3C64XX_GPH8_EINT_G6_8		(0x07 <<  0)
 
-#define S3C64XX_GPH9_MMC1_DATA7		(0x02 << 36)
-#define S3C64XX_GPH9_MMC2_DATA3		(0x03 << 36)
-#define S3C64XX_GPH9_I2S_V40_DI		(0x05 << 36)
-#define S3C64XX_GPH9_EINT_G6_9		(0x07 << 36)
-
+#define S3C64XX_GPH9_OUTPUT		(0x01 <<  4)
+#define S3C64XX_GPH9_MMC1_DATA7		(0x02 <<  4)
+#define S3C64XX_GPH9_MMC2_DATA3		(0x03 <<  4)
+#define S3C64XX_GPH9_I2S_V40_DI		(0x05 <<  4)
+#define S3C64XX_GPH9_EINT_G6_9		(0x07 <<  4)
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index beb7ecd..4ef6af0 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.29
-# Tue Mar 24 10:23:20 2009
+# Linux kernel version: 2.6.30-rc5
+# Mon May 11 09:01:02 2009
 #
 CONFIG_MICROBLAZE=y
 # CONFIG_SWAP is not set
@@ -32,6 +32,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 # CONFIG_TASKSTATS is not set
@@ -63,6 +64,7 @@
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
+# CONFIG_STRIP_ASM_SYMS is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_PRINTK=y
 CONFIG_BUG=y
@@ -80,6 +82,8 @@
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+# CONFIG_SLOW_WORK is not set
 # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
@@ -92,7 +96,6 @@
 # CONFIG_MODULE_SRCVERSION_ALL is not set
 CONFIG_BLOCK=y
 # CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_BLK_DEV_INTEGRITY is not set
 
@@ -166,6 +169,8 @@
 # CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
 
 #
 # Exectuable file formats
@@ -180,7 +185,6 @@
 #
 # Networking options
 #
-CONFIG_COMPAT_NET_DEV_OPS=y
 CONFIG_PACKET=y
 # CONFIG_PACKET_MMAP is not set
 CONFIG_UNIX=y
@@ -232,6 +236,7 @@
 # CONFIG_LAPB is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
 # CONFIG_NET_SCHED is not set
 # CONFIG_DCB is not set
 
@@ -244,7 +249,6 @@
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
 CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
 CONFIG_WIRELESS_OLD_REGULATORY=y
@@ -379,6 +383,7 @@
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
 CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -388,6 +393,7 @@
 # CONFIG_PHYLIB is not set
 CONFIG_NET_ETHERNET=y
 # CONFIG_MII is not set
+# CONFIG_ETHOC is not set
 # CONFIG_DNET is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -405,7 +411,6 @@
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
 
 #
 # Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -455,6 +460,7 @@
 CONFIG_LEGACY_PTY_COUNT=256
 # CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
 # CONFIG_RTC is not set
 # CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
@@ -525,7 +531,7 @@
 #
 
 #
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
 #
 # CONFIG_USB_GADGET is not set
 
@@ -538,6 +544,7 @@
 # CONFIG_ACCESSIBILITY is not set
 # CONFIG_RTC_CLASS is not set
 # CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
 # CONFIG_UIO is not set
 # CONFIG_STAGING is not set
 
@@ -563,6 +570,11 @@
 # CONFIG_FUSE_FS is not set
 
 #
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
 # CD-ROM/DVD Filesystems
 #
 # CONFIG_ISO9660_FS is not set
@@ -601,8 +613,13 @@
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
 CONFIG_ROMFS_FS=y
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+# CONFIG_ROMFS_BACKED_BY_BOTH is not set
+CONFIG_ROMFS_ON_BLOCK=y
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
@@ -614,7 +631,6 @@
 CONFIG_NFS_ACL_SUPPORT=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
@@ -647,6 +663,9 @@
 CONFIG_DETECT_SOFTLOCKUP=y
 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
 CONFIG_SCHED_DEBUG=y
 CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
@@ -678,15 +697,8 @@
 # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_FAULT_INJECTION is not set
 CONFIG_SYSCTL_SYSCALL_CHECK=y
-
-#
-# Tracers
-#
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_PAGE_POISONING is not set
+# CONFIG_DYNAMIC_DEBUG is not set
 # CONFIG_SAMPLES is not set
 CONFIG_EARLY_PRINTK=y
 CONFIG_HEART_BEAT=y
@@ -777,6 +789,7 @@
 # Compression
 #
 # CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
 # CONFIG_CRYPTO_LZO is not set
 
 #
@@ -784,6 +797,7 @@
 #
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
 
 #
 # Library routines
@@ -797,8 +811,8 @@
 # CONFIG_CRC7 is not set
 # CONFIG_LIBCRC32C is not set
 CONFIG_ZLIB_INFLATE=y
-CONFIG_PLIST=y
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
 CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index a69d3e3..b156052 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -137,8 +137,8 @@
 
 	intr_type =
 		*(int *) of_get_property(intc, "xlnx,kind-of-intr", NULL);
-	if (intr_type >= (1 << nr_irq))
-		printk(KERN_INFO " ERROR: Mishmash in king-of-intr param\n");
+	if (intr_type >= (1 << (nr_irq + 1)))
+		printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n");
 
 #ifdef CONFIG_SELFMOD_INTC
 	selfmod_function((int *) arr_func, intc_baseaddr);
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 26947ab..c4cae9e 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -473,12 +473,12 @@
 # Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
 #
 ifdef CONFIG_SGI_IP28
-  ifeq ($(call cc-option-yn,-mr10k-cache-barrier=1), n)
-      $(error gcc doesn't support needed option -mr10k-cache-barrier=1)
+  ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n)
+      $(error gcc doesn't support needed option -mr10k-cache-barrier=store)
   endif
 endif
 core-$(CONFIG_SGI_IP28)		+= arch/mips/sgi-ip22/
-cflags-$(CONFIG_SGI_IP28)	+= -mr10k-cache-barrier=1 -I$(srctree)/arch/mips/include/asm/mach-ip28
+cflags-$(CONFIG_SGI_IP28)	+= -mr10k-cache-barrier=store -I$(srctree)/arch/mips/include/asm/mach-ip28
 load-$(CONFIG_SGI_IP28)		+= 0xa800000020004000
 
 #
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 8de858f..c2d53c1 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -956,7 +956,7 @@
 	void __user * __cl_addr = (addr);				\
 	unsigned long __cl_size = (n);					\
 	if (__cl_size && access_ok(VERIFY_WRITE,			\
-		((unsigned long)(__cl_addr)), __cl_size))		\
+					__cl_addr, __cl_size))		\
 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
 	__cl_size;							\
 })
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index f0cf46a..1c0048a 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -82,8 +82,7 @@
 	int cpu = smp_processor_id();
 
 	if (cpu_context(cpu, mm) != 0) {
-		unsigned long flags;
-		int size;
+		unsigned long size, flags;
 
 #ifdef DEBUG_TLB
 		printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
@@ -121,8 +120,7 @@
 
 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-	unsigned long flags;
-	int size;
+	unsigned long size, flags;
 
 #ifdef DEBUG_TLB
 	printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 9619f66..892be42 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -117,8 +117,7 @@
 	int cpu = smp_processor_id();
 
 	if (cpu_context(cpu, mm) != 0) {
-		unsigned long flags;
-		int size;
+		unsigned long size, flags;
 
 		ENTER_CRITICAL(flags);
 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
@@ -160,8 +159,7 @@
 
 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-	unsigned long flags;
-	int size;
+	unsigned long size, flags;
 
 	ENTER_CRITICAL(flags);
 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 4f01a3b..4ec95cc 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -111,8 +111,7 @@
 /* Usable for KV1 addresses only! */
 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-	unsigned long flags;
-	int size;
+	unsigned long size, flags;
 
 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 	size = (size + 1) >> 1;
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 4ad5c33..45b6694 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -148,7 +148,7 @@
 
 	if (sgint->istat1 & SGINT_ISTAT1_PWR) {
 		/* Wait until interrupt goes away */
-		disable_irq(SGI_PANEL_IRQ);
+		disable_irq_nosync(SGI_PANEL_IRQ);
 		init_timer(&debounce_timer);
 		debounce_timer.function = debounce;
 		debounce_timer.expires = jiffies + 5;
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index b6cab08..9b95d80 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -53,7 +53,7 @@
 
 static void ip32_machine_power_off(void)
 {
-	volatile unsigned char reg_a, xctrl_a, xctrl_b;
+	unsigned char reg_a, xctrl_a, xctrl_b;
 
 	disable_irq(MACEISA_RTC_IRQ);
 	reg_a = CMOS_READ(RTC_REG_A);
@@ -91,9 +91,10 @@
 
 static void debounce(unsigned long data)
 {
-	volatile unsigned char reg_a, reg_c, xctrl_a;
+	unsigned char reg_a, reg_c, xctrl_a;
 
 	reg_c = CMOS_READ(RTC_INTR_FLAGS);
+	reg_a = CMOS_READ(RTC_REG_A);
 	CMOS_WRITE(reg_a | DS_REGA_DV0, RTC_REG_A);
 	wbflush();
 	xctrl_a = CMOS_READ(DS_B1_XCTRL4A);
@@ -137,7 +138,7 @@
 
 static irqreturn_t ip32_rtc_int(int irq, void *dev_id)
 {
-	volatile unsigned char reg_c;
+	unsigned char reg_c;
 
 	reg_c = CMOS_READ(RTC_INTR_FLAGS);
 	if (!(reg_c & RTC_IRQF)) {
@@ -145,7 +146,7 @@
 			"%s: RTC IRQ without RTC_IRQF\n", __func__);
 	}
 	/* Wait until interrupt goes away */
-	disable_irq(MACEISA_RTC_IRQ);
+	disable_irq_nosync(MACEISA_RTC_IRQ);
 	init_timer(&debounce_timer);
 	debounce_timer.function = debounce;
 	debounce_timer.expires = jiffies + 50;
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index ac14f52..e28e65e 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -1,13 +1,14 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.29-rc8
-# Fri Mar 13 09:28:45 2009
+# Linux kernel version: 2.6.30-rc5
+# Fri May 15 10:37:00 2009
 #
 CONFIG_PPC64=y
 
 #
 # Processor support
 #
+CONFIG_PPC_BOOK3S=y
 # CONFIG_POWER4_ONLY is not set
 CONFIG_POWER3=y
 CONFIG_POWER4=y
@@ -55,9 +56,11 @@
 # CONFIG_GENERIC_TBSYNC is not set
 CONFIG_AUDIT_ARCH=y
 CONFIG_GENERIC_BUG=y
+CONFIG_DTC=y
 # CONFIG_DEFAULT_UIMAGE is not set
 # CONFIG_PPC_DCR_NATIVE is not set
 # CONFIG_PPC_DCR_MMIO is not set
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 
 #
@@ -72,6 +75,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
 # CONFIG_AUDIT is not set
@@ -88,8 +92,7 @@
 CONFIG_LOG_BUF_SHIFT=17
 # CONFIG_GROUP_SCHED is not set
 # CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
@@ -99,6 +102,9 @@
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 CONFIG_ANON_INODES=y
@@ -107,6 +113,7 @@
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
+# CONFIG_STRIP_ASM_SYMS is not set
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
@@ -138,6 +145,7 @@
 CONFIG_HAVE_ARCH_TRACEHOOK=y
 CONFIG_HAVE_DMA_ATTRS=y
 CONFIG_USE_GENERIC_SMP_HELPERS=y
+# CONFIG_SLOW_WORK is not set
 # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
@@ -150,7 +158,6 @@
 # CONFIG_MODULE_SRCVERSION_ALL is not set
 CONFIG_STOP_MACHINE=y
 CONFIG_BLOCK=y
-# CONFIG_BLK_DEV_IO_TRACE is not set
 CONFIG_BLK_DEV_BSG=y
 # CONFIG_BLK_DEV_INTEGRITY is not set
 CONFIG_BLOCK_COMPAT=y
@@ -172,7 +179,6 @@
 #
 # Platform support
 #
-CONFIG_PPC_MULTIPLATFORM=y
 # CONFIG_PPC_PSERIES is not set
 # CONFIG_PPC_ISERIES is not set
 # CONFIG_PPC_PMAC is not set
@@ -209,6 +215,7 @@
 # CONFIG_SPU_TRACE is not set
 CONFIG_SPU_BASE=y
 # CONFIG_PQ2ADS is not set
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
 # CONFIG_IPIC is not set
 # CONFIG_MPIC is not set
 # CONFIG_MPIC_WEIRD is not set
@@ -279,11 +286,14 @@
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_BOUNCE=y
 CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
 CONFIG_ARCH_MEMORY_PROBE=y
 CONFIG_PPC_HAS_HASH_64K=y
 CONFIG_PPC_4K_PAGES=y
 # CONFIG_PPC_16K_PAGES is not set
 # CONFIG_PPC_64K_PAGES is not set
+# CONFIG_PPC_256K_PAGES is not set
 CONFIG_FORCE_MAX_ZONEORDER=13
 CONFIG_SCHED_SMT=y
 CONFIG_PROC_DEVICETREE=y
@@ -316,7 +326,6 @@
 #
 # Networking options
 #
-CONFIG_COMPAT_NET_DEV_OPS=y
 CONFIG_PACKET=y
 CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
@@ -389,6 +398,7 @@
 # CONFIG_LAPB is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
 # CONFIG_NET_SCHED is not set
 # CONFIG_DCB is not set
 
@@ -396,6 +406,7 @@
 # Network testing
 #
 # CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
 # CONFIG_HAMRADIO is not set
 # CONFIG_CAN is not set
 # CONFIG_IRDA is not set
@@ -419,11 +430,9 @@
 # CONFIG_BT_HCIBFUSB is not set
 # CONFIG_BT_HCIVHCI is not set
 # CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
 CONFIG_WIRELESS=y
 CONFIG_CFG80211=m
 # CONFIG_CFG80211_REG_DEBUG is not set
-CONFIG_NL80211=y
 # CONFIG_WIRELESS_OLD_REGULATORY is not set
 CONFIG_WIRELESS_EXT=y
 # CONFIG_WIRELESS_EXT_SYSFS is not set
@@ -602,6 +611,7 @@
 # CONFIG_SCSI_SRP_ATTRS is not set
 # CONFIG_SCSI_LOWLEVEL is not set
 # CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
 # CONFIG_ATA is not set
 CONFIG_MD=y
 # CONFIG_BLK_DEV_MD is not set
@@ -616,6 +626,7 @@
 # CONFIG_DM_UEVENT is not set
 # CONFIG_MACINTOSH_DRIVERS is not set
 CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -625,6 +636,8 @@
 # CONFIG_PHYLIB is not set
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=m
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -646,12 +659,13 @@
 CONFIG_WLAN_80211=y
 # CONFIG_LIBERTAS is not set
 # CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
 # CONFIG_USB_ZD1201 is not set
 # CONFIG_USB_NET_RNDIS_WLAN is not set
 # CONFIG_RTL8187 is not set
 # CONFIG_MAC80211_HWSIM is not set
 # CONFIG_P54_COMMON is not set
-# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_AR9170_USB is not set
 # CONFIG_HOSTAP is not set
 # CONFIG_B43 is not set
 # CONFIG_B43LEGACY is not set
@@ -673,6 +687,7 @@
 CONFIG_USB_USBNET=m
 CONFIG_USB_NET_AX8817X=m
 # CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_EEM is not set
 # CONFIG_USB_NET_DM9601 is not set
 # CONFIG_USB_NET_SMSC95XX is not set
 # CONFIG_USB_NET_GL620A is not set
@@ -724,28 +739,7 @@
 #
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_JOYSTICK=y
-# CONFIG_JOYSTICK_ANALOG is not set
-# CONFIG_JOYSTICK_A3D is not set
-# CONFIG_JOYSTICK_ADI is not set
-# CONFIG_JOYSTICK_COBRA is not set
-# CONFIG_JOYSTICK_GF2K is not set
-# CONFIG_JOYSTICK_GRIP is not set
-# CONFIG_JOYSTICK_GRIP_MP is not set
-# CONFIG_JOYSTICK_GUILLEMOT is not set
-# CONFIG_JOYSTICK_INTERACT is not set
-# CONFIG_JOYSTICK_SIDEWINDER is not set
-# CONFIG_JOYSTICK_TMDC is not set
-# CONFIG_JOYSTICK_IFORCE is not set
-# CONFIG_JOYSTICK_WARRIOR is not set
-# CONFIG_JOYSTICK_MAGELLAN is not set
-# CONFIG_JOYSTICK_SPACEORB is not set
-# CONFIG_JOYSTICK_SPACEBALL is not set
-# CONFIG_JOYSTICK_STINGER is not set
-# CONFIG_JOYSTICK_TWIDJOY is not set
-# CONFIG_JOYSTICK_ZHENHUA is not set
-# CONFIG_JOYSTICK_JOYDUMP is not set
-# CONFIG_JOYSTICK_XPAD is not set
+# CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
 # CONFIG_INPUT_TOUCHSCREEN is not set
 # CONFIG_INPUT_MISC is not set
@@ -864,6 +858,7 @@
 # CONFIG_FB_VIRTUAL is not set
 # CONFIG_FB_METRONOME is not set
 # CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
@@ -934,15 +929,17 @@
 #
 # Special HID drivers
 #
-# CONFIG_HID_COMPAT is not set
 # CONFIG_HID_A4TECH is not set
 # CONFIG_HID_APPLE is not set
 # CONFIG_HID_BELKIN is not set
 # CONFIG_HID_CHERRY is not set
 # CONFIG_HID_CHICONY is not set
 # CONFIG_HID_CYPRESS is not set
+# CONFIG_DRAGONRISE_FF is not set
 # CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
 # CONFIG_HID_GYRATION is not set
+# CONFIG_HID_KENSINGTON is not set
 # CONFIG_HID_LOGITECH is not set
 # CONFIG_HID_MICROSOFT is not set
 # CONFIG_HID_MONTEREY is not set
@@ -950,7 +947,7 @@
 # CONFIG_HID_PANTHERLORD is not set
 # CONFIG_HID_PETALYNX is not set
 # CONFIG_HID_SAMSUNG is not set
-# CONFIG_HID_SONY is not set
+CONFIG_HID_SONY=m
 # CONFIG_HID_SUNPLUS is not set
 # CONFIG_GREENASIA_FF is not set
 # CONFIG_HID_TOPSEED is not set
@@ -1012,11 +1009,11 @@
 # CONFIG_USB_TMC is not set
 
 #
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
 #
 
 #
-# see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
 #
 CONFIG_USB_STORAGE=m
 # CONFIG_USB_STORAGE_DEBUG is not set
@@ -1058,7 +1055,6 @@
 # CONFIG_USB_LED is not set
 # CONFIG_USB_CYPRESS_CY7C63 is not set
 # CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGET is not set
 # CONFIG_USB_IDMOUSE is not set
 # CONFIG_USB_FTDI_ELAN is not set
 # CONFIG_USB_APPLEDISPLAY is not set
@@ -1074,6 +1070,7 @@
 #
 # OTG and related infrastructure
 #
+# CONFIG_NOP_USB_XCEIV is not set
 # CONFIG_MMC is not set
 # CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
@@ -1113,8 +1110,10 @@
 #
 # on-CPU RTC drivers
 #
-CONFIG_RTC_DRV_PPC=m
+# CONFIG_RTC_DRV_GENERIC is not set
+CONFIG_RTC_DRV_PS3=m
 # CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
 # CONFIG_UIO is not set
 # CONFIG_STAGING is not set
 
@@ -1125,6 +1124,7 @@
 # CONFIG_EXT2_FS_XATTR is not set
 # CONFIG_EXT2_FS_XIP is not set
 CONFIG_EXT3_FS=m
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 CONFIG_EXT3_FS_XATTR=y
 # CONFIG_EXT3_FS_POSIX_ACL is not set
 # CONFIG_EXT3_FS_SECURITY is not set
@@ -1161,6 +1161,11 @@
 # CONFIG_FUSE_FS is not set
 
 #
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
 # CD-ROM/DVD Filesystems
 #
 CONFIG_ISO9660_FS=m
@@ -1211,6 +1216,7 @@
 # CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
@@ -1223,7 +1229,6 @@
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=y
 CONFIG_SUNRPC_GSS=y
-# CONFIG_SUNRPC_REGISTER_V4 is not set
 CONFIG_RPCSEC_GSS_KRB5=y
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
@@ -1283,6 +1288,7 @@
 # CONFIG_NLS_KOI8_U is not set
 # CONFIG_NLS_UTF8 is not set
 # CONFIG_DLM is not set
+CONFIG_BINARY_PRINTF=y
 
 #
 # Library routines
@@ -1296,15 +1302,16 @@
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
 # CONFIG_LIBCRC32C is not set
-CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_INFLATE=y
 CONFIG_ZLIB_DEFLATE=m
 CONFIG_LZO_COMPRESS=m
 CONFIG_LZO_DECOMPRESS=m
-CONFIG_PLIST=y
+CONFIG_DECOMPRESS_GZIP=y
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
 CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
 
 #
 # Kernel hacking
@@ -1322,6 +1329,9 @@
 CONFIG_DETECT_SOFTLOCKUP=y
 # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
 CONFIG_SCHED_DEBUG=y
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_TIMER_STATS is not set
@@ -1357,12 +1367,15 @@
 # CONFIG_FAULT_INJECTION is not set
 # CONFIG_LATENCYTOP is not set
 CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DEBUG_PAGEALLOC is not set
 CONFIG_NOP_TRACER=y
 CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
 CONFIG_HAVE_DYNAMIC_FTRACE=y
 CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
 CONFIG_RING_BUFFER=y
 CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
 
 #
 # Tracers
@@ -1371,18 +1384,21 @@
 # CONFIG_IRQSOFF_TRACER is not set
 # CONFIG_SCHED_TRACER is not set
 # CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
 # CONFIG_BOOT_TRACER is not set
 # CONFIG_TRACE_BRANCH_PROFILING is not set
 # CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_FTRACE_STARTUP_TEST is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_DYNAMIC_DEBUG is not set
 # CONFIG_SAMPLES is not set
 CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_KGDB is not set
 CONFIG_PRINT_STACK_DEPTH=64
 CONFIG_DEBUG_STACKOVERFLOW=y
 # CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
 # CONFIG_CODE_PATCHING_SELFTEST is not set
 # CONFIG_FTR_FIXUP_SELFTEST is not set
 # CONFIG_MSI_BITMAP_SELFTEST is not set
@@ -1415,10 +1431,12 @@
 CONFIG_CRYPTO_HASH2=y
 CONFIG_CRYPTO_RNG=m
 CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_MANAGER2=y
 CONFIG_CRYPTO_GF128MUL=m
 # CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
 # CONFIG_CRYPTO_CRYPTD is not set
 # CONFIG_CRYPTO_AUTHENC is not set
 # CONFIG_CRYPTO_TEST is not set
@@ -1487,6 +1505,7 @@
 # Compression
 #
 # CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
 CONFIG_CRYPTO_LZO=m
 
 #
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 70e2a736..2d182f1 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -157,7 +157,7 @@
 	 * 0xe8, 0x4c, 0x00, 0x28,    ld      r2,40(r12)
 	 */
 
-	pr_debug("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
+	pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
 
 	/* Find where the trampoline jumps to */
 	if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
@@ -165,7 +165,7 @@
 		return -EFAULT;
 	}
 
-	pr_debug(" %08x %08x", jmp[0], jmp[1]);
+	pr_devel(" %08x %08x", jmp[0], jmp[1]);
 
 	/* verify that this is what we expect it to be */
 	if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
@@ -181,18 +181,18 @@
 	offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
 		(int)((short)jmp[1]);
 
-	pr_debug(" %x ", offset);
+	pr_devel(" %x ", offset);
 
 	/* get the address this jumps too */
 	tramp = mod->arch.toc + offset + 32;
-	pr_debug("toc: %lx", tramp);
+	pr_devel("toc: %lx", tramp);
 
 	if (probe_kernel_read(jmp, (void *)tramp, 8)) {
 		printk(KERN_ERR "Failed to read %lx\n", tramp);
 		return -EFAULT;
 	}
 
-	pr_debug(" %08x %08x\n", jmp[0], jmp[1]);
+	pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
 
 	ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
 
@@ -269,7 +269,7 @@
 	 *  0x4e, 0x80, 0x04, 0x20  bctr
 	 */
 
-	pr_debug("ip:%lx jumps to %lx", ip, tramp);
+	pr_devel("ip:%lx jumps to %lx", ip, tramp);
 
 	/* Find where the trampoline jumps to */
 	if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
@@ -277,7 +277,7 @@
 		return -EFAULT;
 	}
 
-	pr_debug(" %08x %08x ", jmp[0], jmp[1]);
+	pr_devel(" %08x %08x ", jmp[0], jmp[1]);
 
 	/* verify that this is what we expect it to be */
 	if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
@@ -293,7 +293,7 @@
 	if (tramp & 0x8000)
 		tramp -= 0x10000;
 
-	pr_debug(" %lx ", tramp);
+	pr_devel(" %lx ", tramp);
 
 	if (tramp != addr) {
 		printk(KERN_ERR
@@ -402,7 +402,7 @@
 	/* ld r2,40(r1) */
 	op[1] = 0xe8410028;
 
-	pr_debug("write to %lx\n", rec->ip);
+	pr_devel("write to %lx\n", rec->ip);
 
 	if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
 		return -EPERM;
@@ -442,7 +442,7 @@
 		return -EINVAL;
 	}
 
-	pr_debug("write to %lx\n", rec->ip);
+	pr_devel("write to %lx\n", rec->ip);
 
 	if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
 		return -EPERM;
@@ -594,7 +594,7 @@
 			PPC_LONG "2b,4b\n"
 		".previous"
 
-		: [old] "=r" (old), [faulted] "=r" (faulted)
+		: [old] "=&r" (old), [faulted] "=r" (faulted)
 		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
 		: "memory"
 	);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index a047a6c..8ef8a14 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -264,6 +264,7 @@
 		*(.data.page_aligned)
 	}
 
+	. = ALIGN(L1_CACHE_BYTES);
 	.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
 		*(.data.cacheline_aligned)
 	}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index f5c6fd4..ae1d67c 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -219,7 +219,8 @@
 		entry = do_dcache_icache_coherency(entry);
 	changed = !pte_same(*(ptep), entry);
 	if (changed) {
-		assert_pte_locked(vma->vm_mm, address);
+		if (!(vma->vm_flags & VM_HUGETLB))
+			assert_pte_locked(vma->vm_mm, address);
 		__ptep_set_access_flags(ptep, entry);
 		flush_tlb_page_nohash(vma, address);
 	}
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 3018552..04296fff 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -592,3 +592,17 @@
 	}
 	return irq;
 }
+
+static void __devinit quirk_ipr_msi(struct pci_dev *dev)
+{
+	/* Something prevents MSIs from the IPR from working on Bimini,
+	 * and the driver has no smarts to recover. So disable MSI
+	 * on it for now. */
+
+	if (machine_is(maple)) {
+		dev->no_msi = 1;
+		dev_info(&dev->dev, "Quirk disabled MSI\n");
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+			quirk_ipr_msi);
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index 39e4691..f2a2964 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -263,6 +263,9 @@
 	struct i2c_msg msg;
 	int ret;
 
+	if (!a)
+		return -ENODEV;
+
 	camera_power(1);
 	msg.addr = 0x6e;
 	msg.buf = camera_ncm03j_magic;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index df9e885..a6efe0a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -498,6 +498,19 @@
 	  over full virtualization.  However, when run without a hypervisor
 	  the kernel is theoretically slower and slightly larger.
 
+config PARAVIRT_SPINLOCKS
+	bool "Paravirtualization layer for spinlocks"
+	depends on PARAVIRT && SMP && EXPERIMENTAL
+	---help---
+	  Paravirtualized spinlocks allow a pvops backend to replace the
+	  spinlock implementation with something virtualization-friendly
+	  (for example, block the virtual CPU rather than spinning).
+
+	  Unfortunately the downside is an up to 5% performance hit on
+	  native kernels, with various workloads.
+
+	  If you are unsure how to answer this question, answer N.
+
 config PARAVIRT_CLOCK
 	bool
 	default n
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 378e369..a53da00 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1443,7 +1443,7 @@
 
 #define paravirt_nop	((void *)_paravirt_nop)
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 
 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
 {
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index aee103b..02ecb30 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -82,22 +82,22 @@
 	case 1:						\
 		asm(op "b %1,"__percpu_arg(0)		\
 		    : "+m" (var)			\
-		    : "ri" ((T__)val));			\
+		    : "qi" ((T__)(val)));		\
 		break;					\
 	case 2:						\
 		asm(op "w %1,"__percpu_arg(0)		\
 		    : "+m" (var)			\
-		    : "ri" ((T__)val));			\
+		    : "ri" ((T__)(val)));		\
 		break;					\
 	case 4:						\
 		asm(op "l %1,"__percpu_arg(0)		\
 		    : "+m" (var)			\
-		    : "ri" ((T__)val));			\
+		    : "ri" ((T__)(val)));		\
 		break;					\
 	case 8:						\
 		asm(op "q %1,"__percpu_arg(0)		\
 		    : "+m" (var)			\
-		    : "re" ((T__)val));			\
+		    : "re" ((T__)(val)));		\
 		break;					\
 	default: __bad_percpu_size();			\
 	}						\
@@ -109,7 +109,7 @@
 	switch (sizeof(var)) {				\
 	case 1:						\
 		asm(op "b "__percpu_arg(1)",%0"		\
-		    : "=r" (ret__)			\
+		    : "=q" (ret__)			\
 		    : "m" (var));			\
 		break;					\
 	case 2:						\
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index e304b66..624f133 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -187,14 +187,15 @@
 
 /*
  * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
- * when it traps.  So regs will be the current sp.
+ * when it traps.  The previous stack will be directly underneath the saved
+ * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
  *
  * This is valid only for kernel mode traps.
  */
-static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
-	return (unsigned long)regs;
+	return (unsigned long)(&regs->sp);
 #else
 	return regs->sp;
 #endif
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index e5e6caf..b7e5db8 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,7 +172,7 @@
 	return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
 }
 
-#ifndef CONFIG_PARAVIRT
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
 
 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
 {
@@ -206,7 +206,7 @@
 	__raw_spin_lock(lock);
 }
 
-#endif
+#endif	/* CONFIG_PARAVIRT_SPINLOCKS */
 
 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
 {
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 145cce7..88d1bfc 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -89,7 +89,8 @@
 obj-$(CONFIG_VMI)		+= vmi_32.o vmiclock_32.o
 obj-$(CONFIG_KVM_GUEST)		+= kvm.o
 obj-$(CONFIG_KVM_CLOCK)		+= kvmclock.o
-obj-$(CONFIG_PARAVIRT)		+= paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o
+obj-$(CONFIG_PARAVIRT)		+= paravirt.o paravirt_patch_$(BITS).o
+obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
 obj-$(CONFIG_PARAVIRT_CLOCK)	+= pvclock.o
 
 obj-$(CONFIG_PCSPKR_PLATFORM)	+= pcspeaker.o
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 1c11b81..3029477 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -254,7 +254,7 @@
 }
 
 #ifdef CONFIG_ACPI
-static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
+static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
 {
 	struct acpi_table_header *header = NULL;
 	struct es7000_oem_table *table;
@@ -285,7 +285,7 @@
 	return 0;
 }
 
-static void unmap_unisys_acpi_oem_table(unsigned long oem_addr)
+static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
 {
 	if (!oem_addr)
 		return;
@@ -306,7 +306,7 @@
 static int es7000_acpi_ret;
 
 /* Hook from generic ACPI tables.c */
-static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 {
 	unsigned long oem_addr = 0;
 	int check_dsdt;
@@ -717,7 +717,7 @@
 	.safe_wait_icr_idle		= native_safe_apic_wait_icr_idle,
 };
 
-struct apic apic_es7000 = {
+struct apic __refdata apic_es7000 = {
 
 	.name				= "es7000",
 	.probe				= probe_es7000,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0b776c0..d21d4fb 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -275,7 +275,11 @@
 	}
 	printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
 	       mtrr_state.enabled & 2 ? "en" : "dis");
-	high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
+	if (size_or_mask & 0xffffffffUL)
+		high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
+	else
+		high_width = ffs(size_or_mask>>32) + 32 - 1;
+	high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
 	for (i = 0; i < num_var_ranges; ++i) {
 		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
 			printk(KERN_DEBUG "  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 18dfa30..b79c553 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -442,7 +442,7 @@
 		_ASM_EXTABLE(1b, 4b)
 		_ASM_EXTABLE(2b, 4b)
 
-		: [old] "=r" (old), [faulted] "=r" (faulted)
+		: [old] "=&r" (old), [faulted] "=r" (faulted)
 		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
 		: "memory"
 	);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 8e45f44..9faf43b 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -134,7 +134,9 @@
 		.pv_irq_ops = pv_irq_ops,
 		.pv_apic_ops = pv_apic_ops,
 		.pv_mmu_ops = pv_mmu_ops,
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
 		.pv_lock_ops = pv_lock_ops,
+#endif
 	};
 	return *((void **)&tmpl + type);
 }
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 04df67f..044897b 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -76,9 +76,9 @@
 x86_backtrace(struct pt_regs * const regs, unsigned int depth)
 {
 	struct frame_head *head = (struct frame_head *)frame_pointer(regs);
-	unsigned long stack = kernel_trap_sp(regs);
 
 	if (!user_mode_vm(regs)) {
+		unsigned long stack = kernel_stack_pointer(regs);
 		if (depth)
 			dump_trace(NULL, regs, (unsigned long *)stack, 0,
 				   &backtrace_ops, &depth);
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 3b767d0..172438f 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -9,5 +9,6 @@
 			time.o xen-asm.o xen-asm_$(BITS).o \
 			grant-table.o suspend.o
 
-obj-$(CONFIG_SMP)		+= smp.o spinlock.o
-obj-$(CONFIG_XEN_DEBUG_FS)	+= debugfs.o
\ No newline at end of file
+obj-$(CONFIG_SMP)		+= smp.o
+obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
+obj-$(CONFIG_XEN_DEBUG_FS)	+= debugfs.o
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e25a78e..fba55b1 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -42,6 +42,7 @@
 #include <linux/highmem.h>
 #include <linux/debugfs.h>
 #include <linux/bug.h>
+#include <linux/module.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 2013946..ca6596b 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -62,15 +62,26 @@
 #ifdef CONFIG_SMP
 void xen_smp_init(void);
 
-void __init xen_init_spinlocks(void);
-__cpuinit void xen_init_lock_cpu(int cpu);
-void xen_uninit_lock_cpu(int cpu);
-
 extern cpumask_var_t xen_cpu_initialized_map;
 #else
 static inline void xen_smp_init(void) {}
 #endif
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init xen_init_spinlocks(void);
+__cpuinit void xen_init_lock_cpu(int cpu);
+void xen_uninit_lock_cpu(int cpu);
+#else
+static inline void xen_init_spinlocks(void)
+{
+}
+static inline void xen_init_lock_cpu(int cpu)
+{
+}
+static inline void xen_uninit_lock_cpu(int cpu)
+{
+}
+#endif
 
 /* Declare an asm function, along with symbols needed to make it
    inlineable */
diff --git a/crypto/api.c b/crypto/api.c
index 314dab9..fd2545d 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -221,7 +221,8 @@
 
 		request_module(name);
 
-		if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
+		if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
+		      CRYPTO_ALG_NEED_FALLBACK) &&
 		    snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
 			request_module(tmp);
 
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index 2a342c8..3ca3b66 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -153,7 +153,8 @@
 	if (err)
 		goto out;
 
-	eseqiv_complete2(req);
+	if (giv != req->giv)
+		eseqiv_complete2(req);
 
 out:
 	return err;
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 17e5082..72ac28d 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -5,40 +5,43 @@
 ccflags-y			:= -Os
 ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
 
-obj-y := dsfield.o   dsmthdat.o  dsopcode.o  dswexec.o  dswscope.o \
+# use acpi.o to put all files here into acpi.o modparam namespace
+obj-y	+= acpi.o
+
+acpi-y := dsfield.o   dsmthdat.o  dsopcode.o  dswexec.o  dswscope.o \
 	 dsmethod.o  dsobject.o  dsutils.o   dswload.o  dswstate.o \
 	 dsinit.o
 
-obj-y += evevent.o  evregion.o  evsci.o    evxfevnt.o \
+acpi-y += evevent.o  evregion.o  evsci.o    evxfevnt.o \
 	 evmisc.o   evrgnini.o  evxface.o  evxfregn.o \
 	 evgpe.o    evgpeblk.o
 
-obj-y += exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
+acpi-y += exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
 	 exconvrt.o  exfldio.o  exoparg1.o  exprep.o    exresop.o   exsystem.o\
 	 excreate.o  exmisc.o   exoparg2.o  exregion.o  exstore.o   exutils.o \
 	 exdump.o    exmutex.o  exoparg3.o  exresnte.o  exstoren.o
 
-obj-y += hwacpi.o  hwgpe.o  hwregs.o  hwsleep.o hwxface.o hwvalid.o
+acpi-y += hwacpi.o  hwgpe.o  hwregs.o  hwsleep.o hwxface.o hwvalid.o
 
-obj-$(ACPI_FUTURE_USAGE) += hwtimer.o
+acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
 
-obj-y += nsaccess.o  nsload.o    nssearch.o  nsxfeval.o \
+acpi-y += nsaccess.o  nsload.o    nssearch.o  nsxfeval.o \
 	 nsalloc.o   nseval.o    nsnames.o   nsutils.o   nsxfname.o \
 	 nsdump.o    nsinit.o    nsobject.o  nswalk.o    nsxfobj.o  \
 	 nsparse.o   nspredef.o
 
-obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
+acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
 
-obj-y += psargs.o    psparse.o  psloop.o pstree.o   pswalk.o  \
+acpi-y += psargs.o    psparse.o  psloop.o pstree.o   pswalk.o  \
 	 psopcode.o  psscope.o  psutils.o  psxface.o
 
-obj-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
+acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
 	 rscalc.o  rsirq.o  rsmemory.o  rsutils.o
 
-obj-$(ACPI_FUTURE_USAGE) += rsdump.o
+acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
 
-obj-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
+acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
 
-obj-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
+acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
 		utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
 		utstate.o utmutex.o utobject.o utresrc.o utlock.o
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 772ee5c..2ec394a 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -787,7 +787,12 @@
 
 /* For control registers, both ignored and reserved bits must be preserved */
 
-#define ACPI_PM1_CONTROL_IGNORED_BITS           0x0201	/* Bits 9, 0(SCI_EN) */
+/*
+ * The ACPI spec says to ignore PM1_CTL.SCI_EN (bit 0)
+ * but we need to be able to write ACPI_BITREG_SCI_ENABLE directly
+ * as a BIOS workaround on some machines.
+ */
+#define ACPI_PM1_CONTROL_IGNORED_BITS           0x0200	/* Bits 9 */
 #define ACPI_PM1_CONTROL_RESERVED_BITS          0xC1F8	/* Bits 14-15, 3-8 */
 #define ACPI_PM1_CONTROL_PRESERVED_BITS \
 	       (ACPI_PM1_CONTROL_IGNORED_BITS | ACPI_PM1_CONTROL_RESERVED_BITS)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index e8f7b64..ae862f179 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -312,7 +312,7 @@
       end:
 	if (result)
 		printk(KERN_WARNING PREFIX
-			      "Transitioning device [%s] to D%d\n",
+			      "Device [%s] failed to transition to D%d\n",
 			      device->pnp.bus_id, state);
 	else {
 		device->power.state = state;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f7ca8c5..72069ba 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -202,21 +202,44 @@
  * Suspend / resume control
  */
 static int acpi_idle_suspend;
+static u32 saved_bm_rld;
+
+static void acpi_idle_bm_rld_save(void)
+{
+	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
+}
+static void acpi_idle_bm_rld_restore(void)
+{
+	u32 resumed_bm_rld;
+
+	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
+
+	if (resumed_bm_rld != saved_bm_rld)
+		acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
+}
 
 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
 {
+	if (acpi_idle_suspend == 1)
+		return 0;
+
+	acpi_idle_bm_rld_save();
 	acpi_idle_suspend = 1;
 	return 0;
 }
 
 int acpi_processor_resume(struct acpi_device * device)
 {
+	if (acpi_idle_suspend == 0)
+		return 0;
+
+	acpi_idle_bm_rld_restore();
 	acpi_idle_suspend = 0;
 	return 0;
 }
 
 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
-static int tsc_halts_in_c(int state)
+static void tsc_check_state(int state)
 {
 	switch (boot_cpu_data.x86_vendor) {
 	case X86_VENDOR_AMD:
@@ -226,13 +249,17 @@
 		 * C/P/S0/S1 states when this bit is set.
 		 */
 		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
-			return 0;
+			return;
 
 		/*FALL THROUGH*/
 	default:
-		return state > ACPI_STATE_C1;
+		/* TSC could halt in idle, so notify users */
+		if (state > ACPI_STATE_C1)
+			mark_tsc_unstable("TSC halts in idle");
 	}
 }
+#else
+static void tsc_check_state(int state) { return; }
 #endif
 
 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
@@ -578,14 +605,9 @@
 
 	pr->power.timer_broadcast_on_state = INT_MAX;
 
-	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
+	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 		struct acpi_processor_cx *cx = &pr->power.states[i];
 
-#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
-		/* TSC could halt in idle, so notify users */
-		if (tsc_halts_in_c(cx->type))
-			mark_tsc_unstable("TSC halts in idle");;
-#endif
 		switch (cx->type) {
 		case ACPI_STATE_C1:
 			cx->valid = 1;
@@ -603,6 +625,8 @@
 				acpi_timer_check_state(i, pr, cx);
 			break;
 		}
+		if (cx->valid)
+			tsc_check_state(cx->type);
 
 		if (cx->valid)
 			working++;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index d0d1f4d..7f16f5f 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -45,6 +45,14 @@
 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
 ACPI_MODULE_NAME("processor_throttling");
 
+/* ignore_tpc:
+ *  0 -> acpi processor driver doesn't ignore _TPC values
+ *  1 -> acpi processor driver ignores _TPC values
+ */
+static int ignore_tpc;
+module_param(ignore_tpc, int, 0644);
+MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
+
 struct throttling_tstate {
 	unsigned int cpu;		/* cpu nr */
 	int target_state;		/* target T-state */
@@ -283,6 +291,10 @@
 
 	if (!pr)
 		return -EINVAL;
+
+	if (ignore_tpc)
+		goto end;
+
 	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
 	if (ACPI_FAILURE(status)) {
 		if (status != AE_NOT_FOUND) {
@@ -290,6 +302,8 @@
 		}
 		return -ENODEV;
 	}
+
+end:
 	pr->throttling_platform_limit = (int)tpc;
 	return 0;
 }
@@ -302,6 +316,9 @@
 	struct acpi_processor_limit *limit;
 	int target_state;
 
+	if (ignore_tpc)
+		return 0;
+
 	result = acpi_processor_get_platform_limit(pr);
 	if (result) {
 		/* Throttling Limit is unsupported */
@@ -821,6 +838,14 @@
 	ret = acpi_read_throttling_status(pr, &value);
 	if (ret >= 0) {
 		state = acpi_get_throttling_state(pr, value);
+		if (state == -1) {
+			ACPI_WARNING((AE_INFO,
+				"Invalid throttling state, reset\n"));
+			state = 0;
+			ret = acpi_processor_set_throttling(pr, state);
+			if (ret)
+				return ret;
+		}
 		pr->throttling.state = state;
 	}
 
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index d7ff61c..810cca9 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -538,6 +538,41 @@
 	return -EINVAL;
 }
 
+/*
+ * For some buggy _BQC methods, we need to add a constant value to
+ * the _BQC return value to get the actual current brightness level
+ */
+
+static int bqc_offset_aml_bug_workaround;
+static int __init video_set_bqc_offset(const struct dmi_system_id *d)
+{
+	bqc_offset_aml_bug_workaround = 9;
+	return 0;
+}
+
+static struct dmi_system_id video_dmi_table[] __initdata = {
+	/*
+	 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
+	 */
+	{
+	 .callback = video_set_bqc_offset,
+	 .ident = "Acer Aspire 5720",
+	 .matches = {
+		DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
+		},
+	},
+	{
+	 .callback = video_set_bqc_offset,
+	 .ident = "Acer Aspire 5710Z",
+	 .matches = {
+		DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"),
+		},
+	},
+	{}
+};
+
 static int
 acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
 					unsigned long long *level)
@@ -557,6 +592,7 @@
 				*level = device->brightness->levels[*level + 2];
 
 			}
+			*level += bqc_offset_aml_bug_workaround;
 			device->brightness->curr = *level;
 			return 0;
 		} else {
@@ -2290,6 +2326,8 @@
 
 static int __init acpi_video_init(void)
 {
+	dmi_check_system(video_dmi_table);
+
 	if (intel_opregion_present())
 		return 0;
 
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8f90508..a6cbf7b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -934,8 +934,6 @@
 
 	spin_lock_irqsave(&blkif_io_lock, flags);
 
-	del_gendisk(info->gd);
-
 	/* No more blkif_request(). */
 	blk_stop_queue(info->rq);
 
@@ -949,6 +947,8 @@
 	blk_cleanup_queue(info->rq);
 	info->rq = NULL;
 
+	del_gendisk(info->gd);
+
  out:
 	xenbus_frontend_closed(dev);
 }
@@ -977,8 +977,10 @@
 		break;
 
 	case XenbusStateClosing:
-		if (info->gd == NULL)
-			xenbus_dev_fatal(dev, -ENODEV, "gd is NULL");
+		if (info->gd == NULL) {
+			xenbus_frontend_closed(dev);
+			break;
+		}
 		bd = bdget_disk(info->gd, 0);
 		if (bd == NULL)
 			xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 1392935..9b1624e 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -587,7 +587,7 @@
 	struct device_node *node = vdev->dev.archdata.of_node;
 
 	deviceno = vdev->unit_address;
-	if (deviceno > VIOCD_MAX_CD)
+	if (deviceno >= VIOCD_MAX_CD)
 		return -ENODEV;
 	if (!node)
 		return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index aa83a08..0905079 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2856,6 +2856,7 @@
 		/* Assume a single IPMB channel at zero. */
 		intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
 		intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
+		intf->curr_channel = IPMI_MAX_CHANNELS;
 	}
 
 	if (rv == 0)
@@ -3648,13 +3649,13 @@
 		}
 
 		/*
-		** We need to make sure the channels have been initialized.
-		** The channel_handler routine will set the "curr_channel"
-		** equal to or greater than IPMI_MAX_CHANNELS when all the
-		** channels for this interface have been initialized.
-		*/
+		 * We need to make sure the channels have been initialized.
+		 * The channel_handler routine will set the "curr_channel"
+		 * equal to or greater than IPMI_MAX_CHANNELS when all the
+		 * channels for this interface have been initialized.
+		 */
 		if (intf->curr_channel < IPMI_MAX_CHANNELS) {
-			requeue = 1;     /* Just put the message back for now */
+			requeue = 0; /* Throw the message away */
 			goto out;
 		}
 
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b2ced39..8c74448 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1673,7 +1673,7 @@
 	int ret;
 
 	keyptr = get_keyptr();
-	hash[0] += current->pid + jiffies + get_cycles() + (int)(long)&ret;
+	hash[0] += current->pid + jiffies + get_cycles();
 
 	ret = half_md4_transform(hash, keyptr->secret);
 	put_cpu_var(get_random_int_hash);
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index ed306eb..0c2f55a 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -212,7 +212,8 @@
 			unsigned char * event_entry)
 {
 	const char *name = "";
-	char data[40] = "";
+	/* 41 so there is room for 40 data and 1 nul */
+	char data[41] = "";
 	int i, n_len = 0, d_len = 0;
 	struct tcpa_pc_event *pc_event;
 
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index f9f05d7..6c6656d 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -415,6 +415,7 @@
 static int init_ixp_crypto(void)
 {
 	int ret = -ENODEV;
+	u32 msg[2] = { 0, 0 };
 
 	if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
 				IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
@@ -426,9 +427,35 @@
 		return ret;
 
 	if (!npe_running(npe_c)) {
-		npe_load_firmware(npe_c, npe_name(npe_c), dev);
+		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
+		if (ret) {
+			return ret;
+		}
+		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+			goto npe_error;
+	} else {
+		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
+			goto npe_error;
+
+		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+			goto npe_error;
 	}
 
+	switch ((msg[1]>>16) & 0xff) {
+	case 3:
+		printk(KERN_WARNING "Firmware of %s lacks AES support\n",
+				npe_name(npe_c));
+		support_aes = 0;
+		break;
+	case 4:
+	case 5:
+		support_aes = 1;
+		break;
+	default:
+		printk(KERN_ERR "Firmware of %s lacks crypto support\n",
+			npe_name(npe_c));
+		return -ENODEV;
+	}
 	/* buffer_pool will also be used to sometimes store the hmac,
 	 * so assure it is large enough
 	 */
@@ -459,6 +486,10 @@
 
 	qmgr_enable_irq(RECV_QID);
 	return 0;
+
+npe_error:
+	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
+	ret = -EIO;
 err:
 	if (ctx_pool)
 		dma_pool_destroy(ctx_pool);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 3f0fdd1..856b3cc 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -489,4 +489,4 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michal Ludvig");
 
-MODULE_ALIAS("aes-all");
+MODULE_ALIAS("aes");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 17b24c5..4cd35d8 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -84,6 +84,12 @@
 config DRM_I915_KMS
 	bool "Enable modesetting on intel by default"
 	depends on DRM_I915
+	# i915 KMS depends on ACPI_VIDEO when ACPI is enabled
+	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
+	select VIDEO_OUTPUT_CONTROL if ACPI
+	select BACKLIGHT_CLASS_DEVICE if ACPI
+	select INPUT if ACPI
+	select ACPI_VIDEO if ACPI
 	help
 	  Choose this option if you want kernel modesetting enabled by default,
 	  and you have a new enough userspace to support this. Running old
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 6d80d17..0411d91 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -170,6 +170,14 @@
 	}
 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
 		  (unsigned long long)map->offset, map->size, map->type);
+
+	/* page-align _DRM_SHM maps. They are allocated here so there is no security
+	 * hole created by that and it works around various broken drivers that use
+	 * a non-aligned quantity to map the SAREA. --BenH
+	 */
+	if (map->type == _DRM_SHM)
+		map->size = PAGE_ALIGN(map->size);
+
 	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index f01def1..019b7c5 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -481,7 +481,7 @@
 		}
 		retcode = func(dev, kdata, file_priv);
 
-		if ((retcode == 0) && (cmd & IOC_OUT)) {
+		if (cmd & IOC_OUT) {
 			if (copy_to_user((void __user *)arg, kdata,
 					 _IOC_SIZE(cmd)) != 0)
 				retcode = -EFAULT;
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 4e16ce6..36da913 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -466,7 +466,7 @@
 	struct ide_host *host;
 	unsigned int sel = 0;
 	int ret;
-	hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL };
+	hw_regs_t hw[2], *hws[] = { &hw[0], &hw[1], NULL, NULL };
 	struct ide_port_info d = icside_v6_port_info;
 
 	ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 35dc38d..6415a2e 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -696,7 +696,7 @@
 		}
 		spin_lock_irq(&hwif->lock);
 		enable_irq(hwif->irq);
-		if (startstop == ide_stopped) {
+		if (startstop == ide_stopped && hwif->polling == 0) {
 			ide_unlock_port(hwif);
 			plug_device = 1;
 		}
@@ -868,7 +868,7 @@
 	 * same irq as is currently being serviced here, and Linux
 	 * won't allow another of the same (on any CPU) until we return.
 	 */
-	if (startstop == ide_stopped) {
+	if (startstop == ide_stopped && hwif->polling == 0) {
 		BUG_ON(hwif->handler);
 		ide_unlock_port(hwif);
 		plug_device = 1;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index c19a221..06fe002 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -206,8 +206,6 @@
 
 /*
  * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
- * We list them here and depend on the device side cable detection for them.
- *
  * Some optical devices with the buggy firmwares have the same problem.
  */
 static const struct drive_list_entry ivb_list[] = {
@@ -251,10 +249,25 @@
 	 * - force bit13 (80c cable present) check also for !ivb devices
 	 *   (unless the slave device is pre-ATA3)
 	 */
-	if ((id[ATA_ID_HW_CONFIG] & 0x4000) ||
-	    (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000)))
+	if (id[ATA_ID_HW_CONFIG] & 0x4000)
 		return 1;
 
+	if (ivb) {
+		const char *model = (char *)&id[ATA_ID_PROD];
+
+		if (strstr(model, "TSSTcorp CDDVDW SH-S202")) {
+			/*
+			 * These ATAPI devices always report 80c cable
+			 * so we have to depend on the host in this case.
+			 */
+			if (hwif->cbl == ATA_CBL_PATA80)
+				return 1;
+		} else {
+			/* Depend on the device side cable detection. */
+			if (id[ATA_ID_HW_CONFIG] & 0x2000)
+				return 1;
+		}
+	}
 no_80w:
 	if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
 		return 0;
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 56ff8c4..2148df8 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -31,24 +31,6 @@
 		blk_queue_bounce_limit(drive->queue, addr);
 }
 
-static void ide_dump_opcode(ide_drive_t *drive)
-{
-	struct request *rq = drive->hwif->rq;
-	struct ide_cmd *cmd = NULL;
-
-	if (!rq)
-		return;
-
-	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
-		cmd = rq->special;
-
-	printk(KERN_ERR "ide: failed opcode was: ");
-	if (cmd == NULL)
-		printk(KERN_CONT "unknown\n");
-	else
-		printk(KERN_CONT "0x%02x\n", cmd->tf.command);
-}
-
 u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48)
 {
 	struct ide_taskfile *tf = &cmd->tf;
@@ -91,7 +73,7 @@
 
 static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
 {
-	printk(KERN_ERR "{ ");
+	printk(KERN_CONT "{ ");
 	if (err & ATA_ABORTED)
 		printk(KERN_CONT "DriveStatusError ");
 	if (err & ATA_ICRC)
@@ -121,7 +103,7 @@
 
 static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
 {
-	printk(KERN_ERR "{ ");
+	printk(KERN_CONT "{ ");
 	if (err & ATAPI_ILI)
 		printk(KERN_CONT "IllegalLengthIndication ");
 	if (err & ATAPI_EOM)
@@ -179,7 +161,10 @@
 		else
 			ide_dump_atapi_error(drive, err);
 	}
-	ide_dump_opcode(drive);
+
+	printk(KERN_ERR "%s: possibly failed opcode: 0x%02x\n",
+		drive->name, drive->hwif->cmd.tf.command);
+
 	return err;
 }
 EXPORT_SYMBOL(ide_dump_status);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 7f264ed..c895ed5 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -295,7 +295,7 @@
 
 	timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
 
-	if (ide_busy_sleep(hwif, timeout, use_altstatus))
+	if (ide_busy_sleep(drive, timeout, use_altstatus))
 		return 1;
 
 	/* wait for IRQ and ATA_DRQ */
@@ -316,8 +316,9 @@
 	return rc;
 }
 
-int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus)
+int ide_busy_sleep(ide_drive_t *drive, unsigned long timeout, int altstatus)
 {
+	ide_hwif_t *hwif = drive->hwif;
 	u8 stat;
 
 	timeout += jiffies;
@@ -330,6 +331,8 @@
 			return 0;
 	} while (time_before(jiffies, timeout));
 
+	printk(KERN_ERR "%s: timeout in %s\n", drive->name, __func__);
+
 	return 1;	/* drive timed-out */
 }
 
@@ -420,7 +423,7 @@
 			tp_ops->dev_select(drive);
 			msleep(50);
 			tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
-			(void)ide_busy_sleep(hwif, WAIT_WORSTCASE, 0);
+			(void)ide_busy_sleep(drive, WAIT_WORSTCASE, 0);
 			rc = ide_dev_read_id(drive, cmd, id);
 		}
 
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index cb942a9..3a53e08 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -614,12 +614,6 @@
 {
 	idetape_tape_t *tape = drive->driver_data;
 
-	if (drive->pc->c[0] == REQUEST_SENSE &&
-	    pc->c[0] == REQUEST_SENSE) {
-		printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
-			"Two request sense in serial were issued\n");
-	}
-
 	if (drive->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
 		drive->failed_pc = pc;
 
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index 2aa6999..69860de 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -263,6 +263,7 @@
 	{ 0x24CA, 0x1025, 0x003d },	/* ICH4 on ACER TM290 */
 	{ 0x266F, 0x1025, 0x0066 },	/* ICH6 on ACER Aspire 1694WLMi */
 	{ 0x2653, 0x1043, 0x82D8 },	/* ICH6M on Asus Eee 701 */
+	{ 0x27df, 0x104d, 0x900e },	/* ICH7 on Sony TZ-90 */
 	/* end marker */
 	{ 0, }
 };
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 3ff7231..028de26 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -67,6 +67,7 @@
 	u8 udma_mask;
 	u8 flags;
 } via_isa_bridges[] = {
+	{ "vx855",	PCI_DEVICE_ID_VIA_VX855,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
 	{ "vx800",	PCI_DEVICE_ID_VIA_VX800,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
 	{ "cx700",	PCI_DEVICE_ID_VIA_CX700,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
 	{ "vt8237s",	PCI_DEVICE_ID_VIA_8237S,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
@@ -474,6 +475,7 @@
 	{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1),  0 },
 	{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1),  0 },
 	{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
+	{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), 0 },
 	{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410),      1 },
 	{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
 	{ 0, },
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index e29cdc1..a28c06d 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@
 	clk_disable(kmi->clk);
 }
 
-static int amba_kmi_probe(struct amba_device *dev, void *id)
+static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
 {
 	struct amba_kmi_port *kmi;
 	struct serio *io;
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 7793932..11a6248 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -443,7 +443,7 @@
 	dev_dbg(pcf->dev, "pcf50633_irq\n");
 
 	get_device(pcf->dev);
-	disable_irq(pcf->irq);
+	disable_irq_nosync(pcf->irq);
 	schedule_work(&pcf->irq_work);
 
 	return IRQ_HANDLED;
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index c2be308..fe24079 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -79,10 +79,6 @@
 		/* Cache is CPU endian */
 		dest[i - reg] = be16_to_cpu(dest[i - reg]);
 
-		/* Satisfy non-volatile bits from cache */
-		dest[i - reg] &= wm8350_reg_io_map[i].vol;
-		dest[i - reg] |= wm8350->reg_cache[i];
-
 		/* Mask out non-readable bits */
 		dest[i - reg] &= wm8350_reg_io_map[i].readable;
 	}
@@ -182,9 +178,6 @@
 			(wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable)
 			| src[i - reg];
 
-		/* Don't store volatile bits */
-		wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol;
-
 		src[i - reg] = cpu_to_be16(src[i - reg]);
 	}
 
@@ -1261,7 +1254,6 @@
 		    (i < WM8350_CLOCK_CONTROL_1 || i > WM8350_AIF_TEST)) {
 			value = be16_to_cpu(wm8350->reg_cache[i]);
 			value &= wm8350_reg_io_map[i].readable;
-			value &= ~wm8350_reg_io_map[i].vol;
 			wm8350->reg_cache[i] = value;
 		} else
 			wm8350->reg_cache[i] = reg_map[i];
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 3cf61ec..348443b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -119,7 +119,7 @@
 	edev->edev.class = &enclosure_class;
 	edev->edev.parent = get_device(dev);
 	edev->cb = cb;
-	dev_set_name(&edev->edev, name);
+	dev_set_name(&edev->edev, "%s", name);
 	err = device_register(&edev->edev);
 	if (err)
 		goto err;
@@ -255,8 +255,8 @@
 	ecomp->number = number;
 	cdev = &ecomp->cdev;
 	cdev->parent = get_device(&edev->edev);
-	if (name)
-		dev_set_name(cdev, name);
+	if (name && name[0])
+		dev_set_name(cdev, "%s", name);
 	else
 		dev_set_name(cdev, "%u", number);
 
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 36875dc..7d4febd 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -490,7 +490,7 @@
 	mod_timer(&host->timer, jiffies + HZ);
 }
 
-static int __devinit mmci_probe(struct amba_device *dev, void *id)
+static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
 {
 	struct mmc_platform_data *plat = dev->dev.platform_data;
 	struct mmci_host *host;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 62dee54..43976aa 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -178,7 +178,7 @@
 		/* Calculate flash page address; use block erase (for speed) if
 		 * we're at a block boundary and need to erase the whole block.
 		 */
-		pageaddr = div_u64(instr->len, priv->page_size);
+		pageaddr = div_u64(instr->addr, priv->page_size);
 		do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
 		pageaddr = pageaddr << priv->page_offset;
 
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index c49ddd0..b4bb06f 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -35,8 +35,22 @@
 #define DRV_VER			"2.0.348"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC"
+#define OC_NAME			"Emulex OneConnect 10Gbps NIC"
 #define DRV_DESC		BE_NAME "Driver"
 
+#define BE_VENDOR_ID 		0x19a2
+#define BE_DEVICE_ID1		0x211
+#define OC_DEVICE_ID1		0x700
+#define OC_DEVICE_ID2		0x701
+
+static inline char *nic_name(struct pci_dev *pdev)
+{
+	if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
+		return OC_NAME;
+	else
+		return BE_NAME;
+}
+
 /* Number of bytes of an RX frame that are copied to skb->data */
 #define BE_HDR_LEN 		64
 #define BE_MAX_JUMBO_FRAME_SIZE	9018
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 30d0c81..5c378b5 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -28,10 +28,10 @@
 module_param(rx_frag_size, uint, S_IRUGO);
 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
 
-#define BE_VENDOR_ID 		0x19a2
-#define BE2_DEVICE_ID_1 	0x0211
 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
-	{ PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) },
+	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -1859,7 +1859,7 @@
 	if (status != 0)
 		goto stats_clean;
 
-	dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num);
+	dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
 	return 0;
 
 stats_clean:
@@ -1873,7 +1873,7 @@
 disable_dev:
 	pci_disable_device(pdev);
 do_none:
-	dev_warn(&pdev->dev, BE_NAME " initialization failed\n");
+	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
 	return status;
 }
 
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 8c2e5ab..faf094a 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1465,6 +1465,12 @@
 	return best;
 }
 
+static int agg_device_up(const struct aggregator *agg)
+{
+	return (netif_running(agg->slave->dev) &&
+		netif_carrier_ok(agg->slave->dev));
+}
+
 /**
  * ad_agg_selection_logic - select an aggregation group for a team
  * @aggregator: the aggregator we're looking at
@@ -1496,14 +1502,13 @@
 	struct port *port;
 
 	origin = agg;
-
 	active = __get_active_agg(agg);
-	best = active;
+	best = (active && agg_device_up(active)) ? active : NULL;
 
 	do {
 		agg->is_active = 0;
 
-		if (agg->num_of_ports)
+		if (agg->num_of_ports && agg_device_up(agg))
 			best = ad_agg_selection_test(best, agg);
 
 	} while ((agg = __get_next_agg(agg)));
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index aa08987..dbd3436 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -127,11 +127,11 @@
 static int mdio_probe(struct meth_private *priv)
 {
 	int i;
-	unsigned long p2, p3;
+	unsigned long p2, p3, flags;
 	/* check if phy is detected already */
 	if(priv->phy_addr>=0&&priv->phy_addr<32)
 		return 0;
-	spin_lock(&priv->meth_lock);
+	spin_lock_irqsave(&priv->meth_lock, flags);
 	for (i=0;i<32;++i){
 		priv->phy_addr=i;
 		p2=mdio_read(priv,2);
@@ -157,7 +157,7 @@
 			break;
 		}
 	}
-	spin_unlock(&priv->meth_lock);
+	spin_unlock_irqrestore(&priv->meth_lock, flags);
 	if(priv->phy_addr<32) {
 		return 0;
 	}
@@ -373,14 +373,14 @@
 static void meth_rx(struct net_device* dev, unsigned long int_status)
 {
 	struct sk_buff *skb;
-	unsigned long status;
+	unsigned long status, flags;
 	struct meth_private *priv = netdev_priv(dev);
 	unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
 
-	spin_lock(&priv->meth_lock);
+	spin_lock_irqsave(&priv->meth_lock, flags);
 	priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
 	mace->eth.dma_ctrl = priv->dma_ctrl;
-	spin_unlock(&priv->meth_lock);
+	spin_unlock_irqrestore(&priv->meth_lock, flags);
 
 	if (int_status & METH_INT_RX_UNDERFLOW) {
 		fifo_rptr = (fifo_rptr - 1) & 0x0f;
@@ -452,12 +452,12 @@
 		mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
 		ADVANCE_RX_PTR(priv->rx_write);
 	}
-	spin_lock(&priv->meth_lock);
+	spin_lock_irqsave(&priv->meth_lock, flags);
 	/* In case there was underflow, and Rx DMA was disabled */
 	priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
 	mace->eth.dma_ctrl = priv->dma_ctrl;
 	mace->eth.int_stat = METH_INT_RX_THRESHOLD;
-	spin_unlock(&priv->meth_lock);
+	spin_unlock_irqrestore(&priv->meth_lock, flags);
 }
 
 static int meth_tx_full(struct net_device *dev)
@@ -470,11 +470,11 @@
 static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
 {
 	struct meth_private *priv = netdev_priv(dev);
-	unsigned long status;
+	unsigned long status, flags;
 	struct sk_buff *skb;
 	unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
 
-	spin_lock(&priv->meth_lock);
+	spin_lock_irqsave(&priv->meth_lock, flags);
 
 	/* Stop DMA notification */
 	priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
@@ -527,12 +527,13 @@
 	}
 
 	mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
-	spin_unlock(&priv->meth_lock);
+	spin_unlock_irqrestore(&priv->meth_lock, flags);
 }
 
 static void meth_error(struct net_device* dev, unsigned status)
 {
 	struct meth_private *priv = netdev_priv(dev);
+	unsigned long flags;
 
 	printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
 	/* check for errors too... */
@@ -547,7 +548,7 @@
 		printk(KERN_WARNING "meth: Rx overflow\n");
 	if (status & (METH_INT_RX_UNDERFLOW)) {
 		printk(KERN_WARNING "meth: Rx underflow\n");
-		spin_lock(&priv->meth_lock);
+		spin_lock_irqsave(&priv->meth_lock, flags);
 		mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
 		/* more underflow interrupts will be delivered,
 		 * effectively throwing us into an infinite loop.
@@ -555,7 +556,7 @@
 		priv->dma_ctrl &= ~METH_DMA_RX_EN;
 		mace->eth.dma_ctrl = priv->dma_ctrl;
 		DPRINTK("Disabled meth Rx DMA temporarily\n");
-		spin_unlock(&priv->meth_lock);
+		spin_unlock_irqrestore(&priv->meth_lock, flags);
 	}
 	mace->eth.int_stat = METH_INT_ERROR;
 }
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 91f50de..a276125 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -125,8 +125,10 @@
 
 	if (cq->is_tx)
 		del_timer(&cq->timer);
-	else
+	else {
 		napi_disable(&cq->napi);
+		netif_napi_del(&cq->napi);
+	}
 
 	mlx4_cq_free(mdev->dev, &cq->mcq);
 }
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a400d71..6bb5af3 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -569,7 +569,7 @@
 		if (rxq->rx_curr_desc == rxq->rx_ring_size)
 			rxq->rx_curr_desc = 0;
 
-		dma_unmap_single(NULL, rx_desc->buf_ptr,
+		dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
 				 rx_desc->buf_size, DMA_FROM_DEVICE);
 		rxq->rx_desc_count--;
 		rx++;
@@ -678,8 +678,9 @@
 
 		rx_desc = rxq->rx_desc_area + rx;
 
-		rx_desc->buf_ptr = dma_map_single(NULL, skb->data,
-					mp->skb_size, DMA_FROM_DEVICE);
+		rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
+						  skb->data, mp->skb_size,
+						  DMA_FROM_DEVICE);
 		rx_desc->buf_size = mp->skb_size;
 		rxq->rx_skb[rx] = skb;
 		wmb();
@@ -718,6 +719,7 @@
 
 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 {
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 	int nr_frags = skb_shinfo(skb)->nr_frags;
 	int frag;
 
@@ -746,10 +748,10 @@
 
 		desc->l4i_chk = 0;
 		desc->byte_cnt = this_frag->size;
-		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
-						this_frag->page_offset,
-						this_frag->size,
-						DMA_TO_DEVICE);
+		desc->buf_ptr = dma_map_page(mp->dev->dev.parent,
+					     this_frag->page,
+					     this_frag->page_offset,
+					     this_frag->size, DMA_TO_DEVICE);
 	}
 }
 
@@ -826,7 +828,8 @@
 
 	desc->l4i_chk = l4i_chk;
 	desc->byte_cnt = length;
-	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+	desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
+				       length, DMA_TO_DEVICE);
 
 	__skb_queue_tail(&txq->tx_skb, skb);
 
@@ -956,10 +959,10 @@
 		}
 
 		if (cmd_sts & TX_FIRST_DESC) {
-			dma_unmap_single(NULL, desc->buf_ptr,
+			dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
 					 desc->byte_cnt, DMA_TO_DEVICE);
 		} else {
-			dma_unmap_page(NULL, desc->buf_ptr,
+			dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
 				       desc->byte_cnt, DMA_TO_DEVICE);
 		}
 
@@ -1894,9 +1897,9 @@
 						mp->rx_desc_sram_size);
 		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
 	} else {
-		rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
-							&rxq->rx_desc_dma,
-							GFP_KERNEL);
+		rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+						       size, &rxq->rx_desc_dma,
+						       GFP_KERNEL);
 	}
 
 	if (rxq->rx_desc_area == NULL) {
@@ -1947,7 +1950,7 @@
 	if (index == 0 && size <= mp->rx_desc_sram_size)
 		iounmap(rxq->rx_desc_area);
 	else
-		dma_free_coherent(NULL, size,
+		dma_free_coherent(mp->dev->dev.parent, size,
 				  rxq->rx_desc_area,
 				  rxq->rx_desc_dma);
 
@@ -1979,7 +1982,7 @@
 	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
 		iounmap(rxq->rx_desc_area);
 	else
-		dma_free_coherent(NULL, rxq->rx_desc_area_size,
+		dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
 				  rxq->rx_desc_area, rxq->rx_desc_dma);
 
 	kfree(rxq->rx_skb);
@@ -2007,9 +2010,9 @@
 						mp->tx_desc_sram_size);
 		txq->tx_desc_dma = mp->tx_desc_sram_addr;
 	} else {
-		txq->tx_desc_area = dma_alloc_coherent(NULL, size,
-							&txq->tx_desc_dma,
-							GFP_KERNEL);
+		txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+						       size, &txq->tx_desc_dma,
+						       GFP_KERNEL);
 	}
 
 	if (txq->tx_desc_area == NULL) {
@@ -2053,7 +2056,7 @@
 	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
 		iounmap(txq->tx_desc_area);
 	else
-		dma_free_coherent(NULL, txq->tx_desc_area_size,
+		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
 				  txq->tx_desc_area, txq->tx_desc_dma);
 }
 
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 7be0ae10..c2eeac4 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -115,7 +115,7 @@
 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
-		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32),
+		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
 		&vp_reg->kdfcctl_errors_mask);
 
 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 02419bf..f9fc389 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -819,10 +819,9 @@
 			roq_data = (struct i2400m_roq_data *) &skb->cb;
 			i2400m_net_erx(i2400m, skb, roq_data->cs);
 		}
-		else {
+		else
 			__i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
-			__i2400m_roq_update_ws(i2400m, roq, sn + 1);
-		}
+		__i2400m_roq_update_ws(i2400m, roq, sn + 1);
 		i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
 				   old_ws, len, sn, nsn, roq->ws);
 	}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 34bf0fd..1a91bf9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -557,7 +557,8 @@
 	} else {
 		error = -ENODEV;
 		/* Fall back to PCI_D0 if native PM is not supported */
-		pci_update_current_state(dev, PCI_D0);
+		if (!dev->pm_cap)
+			dev->current_state = PCI_D0;
 	}
 
 	return error;
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index eeafc6c..bfc1a88 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -269,16 +269,16 @@
 	{KE_KEY, 0x34, KEY_SWITCHVIDEOMODE},
 	{KE_KEY, 0x40, KEY_PREVIOUSSONG},
 	{KE_KEY, 0x41, KEY_NEXTSONG},
-	{KE_KEY, 0x43, KEY_STOP},
+	{KE_KEY, 0x43, KEY_STOPCD},
 	{KE_KEY, 0x45, KEY_PLAYPAUSE},
 	{KE_KEY, 0x50, KEY_EMAIL},
 	{KE_KEY, 0x51, KEY_WWW},
-	{KE_KEY, 0x5C, BTN_EXTRA},  /* Performance */
+	{KE_KEY, 0x5C, KEY_SCREENLOCK},  /* Screenlock */
 	{KE_KEY, 0x5D, KEY_WLAN},
 	{KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
 	{KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */
 	{KE_KEY, 0x82, KEY_CAMERA},
-	{KE_KEY, 0x8A, KEY_TV},
+	{KE_KEY, 0x8A, KEY_PROG1},
 	{KE_KEY, 0x95, KEY_MEDIA},
 	{KE_KEY, 0x99, KEY_PHONE},
 	{KE_END, 0},
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 6f54fd1..353a898 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -158,6 +158,7 @@
 static struct key_entry eeepc_keymap[] = {
 	/* Sleep already handled via generic ACPI code */
 	{KE_KEY, 0x10, KEY_WLAN },
+	{KE_KEY, 0x11, KEY_WLAN },
 	{KE_KEY, 0x12, KEY_PROG1 },
 	{KE_KEY, 0x13, KEY_MUTE },
 	{KE_KEY, 0x14, KEY_VOLUMEDOWN },
@@ -166,6 +167,8 @@
 	{KE_KEY, 0x1b, KEY_ZOOM },
 	{KE_KEY, 0x1c, KEY_PROG2 },
 	{KE_KEY, 0x1d, KEY_PROG3 },
+	{KE_KEY, NOTIFY_BRN_MIN,     KEY_BRIGHTNESSDOWN },
+	{KE_KEY, NOTIFY_BRN_MIN + 2, KEY_BRIGHTNESSUP },
 	{KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
 	{KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
 	{KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
@@ -381,11 +384,13 @@
 EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
 EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
 EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
+EEEPC_CREATE_DEVICE_ATTR(cpufv, CM_ASL_CPUFV);
 
 static struct attribute *platform_attributes[] = {
 	&dev_attr_camera.attr,
 	&dev_attr_cardr.attr,
 	&dev_attr_disp.attr,
+	&dev_attr_cpufv.attr,
 	NULL
 };
 
@@ -512,15 +517,21 @@
 	return 0;
 }
 
-static void notify_brn(void)
+static int notify_brn(void)
 {
+	/* returns the *previous* brightness, or -1 */
 	struct backlight_device *bd = eeepc_backlight_device;
-	if (bd)
+	if (bd) {
+		int old = bd->props.brightness;
 		bd->props.brightness = read_brightness(bd);
+		return old;
+	}
+	return -1;
 }
 
 static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
 {
+	enum rfkill_state state;
 	struct pci_dev *dev;
 	struct pci_bus *bus = pci_find_bus(0, 1);
 
@@ -532,7 +543,9 @@
 		return;
 	}
 
-	if (get_acpi(CM_ASL_WLAN) == 1) {
+	eeepc_wlan_rfkill_state(ehotk->eeepc_wlan_rfkill, &state);
+
+	if (state == RFKILL_STATE_UNBLOCKED) {
 		dev = pci_get_slot(bus, 0);
 		if (dev) {
 			/* Device already present */
@@ -552,23 +565,41 @@
 			pci_dev_put(dev);
 		}
 	}
+
+	rfkill_force_state(ehotk->eeepc_wlan_rfkill, state);
 }
 
 static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
 {
 	static struct key_entry *key;
 	u16 count;
+	int brn = -ENODEV;
 
 	if (!ehotk)
 		return;
 	if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
-		notify_brn();
+		brn = notify_brn();
 	count = ehotk->event_count[event % 128]++;
 	acpi_bus_generate_proc_event(ehotk->device, event, count);
 	acpi_bus_generate_netlink_event(ehotk->device->pnp.device_class,
 					dev_name(&ehotk->device->dev), event,
 					count);
 	if (ehotk->inputdev) {
+		if (brn != -ENODEV) {
+			/* brightness-change events need special
+			 * handling for conversion to key events
+			 */
+			if (brn < 0)
+				brn = event;
+			else
+				brn += NOTIFY_BRN_MIN;
+			if (event < brn)
+				event = NOTIFY_BRN_MIN; /* brightness down */
+			else if (event > brn)
+				event = NOTIFY_BRN_MIN + 2; /* ... up */
+			else
+				event = NOTIFY_BRN_MIN + 1; /* ... unchanged */
+		}
 		key = eepc_get_entry_by_scancode(event);
 		if (key) {
 			switch (key->type) {
@@ -649,6 +680,9 @@
 	if (ACPI_FAILURE(status))
 		printk(EEEPC_ERR "Error installing notify handler\n");
 
+	eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
+	eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
+
 	if (get_acpi(CM_ASL_WLAN) != -1) {
 		ehotk->eeepc_wlan_rfkill = rfkill_allocate(&device->dev,
 							   RFKILL_TYPE_WLAN);
@@ -704,9 +738,6 @@
 			goto bluetooth_fail;
 	}
 
-	eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
-	eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
-
 	return 0;
 
  bluetooth_fail:
@@ -717,6 +748,8 @@
  wlan_fail:
 	if (ehotk->eeepc_wlan_rfkill)
 		rfkill_free(ehotk->eeepc_wlan_rfkill);
+	eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
+	eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
  ehotk_fail:
 	kfree(ehotk);
 	ehotk = NULL;
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 9a3a682..9496494f 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -110,11 +110,9 @@
 
 	/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
 	ret = 0;
-	if (acpi_bus_power_manageable(handle)) {
-		ret = acpi_bus_set_power(handle, ACPI_STATE_D3);
-		if (ret)
-			return ret;
-	}
+	if (acpi_bus_power_manageable(handle))
+		acpi_bus_set_power(handle, ACPI_STATE_D3);
+		/* continue even if acpi_bus_set_power() fails */
 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
 		ret = -ENODEV;
 	return ret;
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 72b1549..c6628f5 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -497,7 +497,7 @@
 		.owner	= THIS_MODULE,
 	},
 	.probe		= da903x_regulator_probe,
-	.remove		= da903x_regulator_remove,
+	.remove		= __devexit_p(da903x_regulator_remove),
 };
 
 static int __init da903x_regulator_init(void)
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 8261535..aaf1f75 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -102,7 +102,7 @@
 	.set_alarm	= pl030_set_alarm,
 };
 
-static int pl030_probe(struct amba_device *dev, void *id)
+static int pl030_probe(struct amba_device *dev, struct amba_id *id)
 {
 	struct pl030_rtc *rtc;
 	int ret;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 333eec6..451fc13 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -127,7 +127,7 @@
 	return 0;
 }
 
-static int pl031_probe(struct amba_device *adev, void *id)
+static int pl031_probe(struct amba_device *adev, struct amba_id *id)
 {
 	int ret;
 	struct pl031_local *ldata;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 8b7983a..36c21b1 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1978,7 +1978,8 @@
 {
 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
 
-	scsi_dma_unmap(cmd);
+	if (cmd->SCp.phase == TW_PHASE_SGLIST)
+		scsi_dma_unmap(cmd);
 } /* End twa_unmap_scsi_data() */
 
 /* scsi_host_template initializer */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c03f1d2..faa0fcf 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -6,7 +6,7 @@
    		     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
                      Brad Strand <linux@3ware.com>
 
-   Copyright (C) 1999-2007 3ware Inc.
+   Copyright (C) 1999-2009 3ware Inc.
 
    Kernel compatiblity By: 	Andre Hedrick <andre@suse.com>
    Non-Copyright (C) 2000	Andre Hedrick <andre@suse.com>
@@ -1294,7 +1294,8 @@
 {
 	dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
 
-	scsi_dma_unmap(cmd);
+	if (cmd->SCp.phase == TW_PHASE_SGLIST)
+		scsi_dma_unmap(cmd);
 } /* End tw_unmap_scsi_data() */
 
 /* This function will reset a device extension */
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 8e71e5e..a5a2ba2 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -6,7 +6,7 @@
    		     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
                      Brad Strand <linux@3ware.com>
 
-   Copyright (C) 1999-2007 3ware Inc.
+   Copyright (C) 1999-2009 3ware Inc.
 
    Kernel compatiblity By:	Andre Hedrick <andre@suse.com>
    Non-Copyright (C) 2000	Andre Hedrick <andre@suse.com>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8ed2990..fb27407 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -628,6 +628,17 @@
 	---help---
 	  Fibre Channel over Ethernet module
 
+config FCOE_FNIC
+	tristate "Cisco FNIC Driver"
+	depends on PCI && X86
+	select LIBFC
+	help
+	  This is support for the Cisco PCI-Express FCoE HBA.
+
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/scsi/scsi.txt>.
+	  The module will be called fnic.
+
 config SCSI_DMX3191D
 	tristate "DMX3191D SCSI support"
 	depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e7c861a..a5049cf 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -39,6 +39,7 @@
 obj-$(CONFIG_LIBFC)		+= libfc/
 obj-$(CONFIG_LIBFCOE)		+= fcoe/
 obj-$(CONFIG_FCOE)		+= fcoe/
+obj-$(CONFIG_FCOE_FNIC)		+= fnic/
 obj-$(CONFIG_ISCSI_TCP) 	+= libiscsi.o	libiscsi_tcp.o iscsi_tcp.o
 obj-$(CONFIG_INFINIBAND_ISER) 	+= libiscsi.o
 obj-$(CONFIG_SCSI_A4000T)	+= 53c700.o	a4000t.o
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
new file mode 100644
index 0000000..37c3440
--- /dev/null
+++ b/drivers/scsi/fnic/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_FCOE_FNIC) += fnic.o
+
+fnic-y	:= \
+	fnic_attrs.o \
+	fnic_isr.o \
+	fnic_main.o \
+	fnic_res.o \
+	fnic_fcs.o \
+	fnic_scsi.o \
+	vnic_cq.o \
+	vnic_dev.o \
+	vnic_intr.o \
+	vnic_rq.o \
+	vnic_wq_copy.o \
+	vnic_wq.o
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h
new file mode 100644
index 0000000..d1225cf
--- /dev/null
+++ b/drivers/scsi/fnic/cq_desc.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+	CQ_DESC_TYPE_WQ_ENET = 0,
+	CQ_DESC_TYPE_DESC_COPY = 1,
+	CQ_DESC_TYPE_WQ_EXCH = 2,
+	CQ_DESC_TYPE_RQ_ENET = 3,
+	CQ_DESC_TYPE_RQ_FCP = 4,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout.  The
+ * type_specfic area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+	__le16 completed_index;
+	__le16 q_number;
+	u8 type_specfic[11];
+	u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS        4
+#define CQ_DESC_TYPE_MASK        ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK       1
+#define CQ_DESC_COLOR_SHIFT      7
+#define CQ_DESC_Q_NUM_BITS       10
+#define CQ_DESC_Q_NUM_MASK       ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS    12
+#define CQ_DESC_COMP_NDX_MASK    ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+	u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+	const struct cq_desc *desc = desc_arg;
+	const u8 type_color = desc->type_color;
+
+	*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+	/*
+	 * Make sure color bit is read from desc *before* other fields
+	 * are read from desc.  Hardware guarantees color bit is last
+	 * bit (byte) written.  Adding the rmb() prevents the compiler
+	 * and/or CPU from reordering the reads which would potentially
+	 * result in reading stale values.
+	 */
+
+	rmb();
+
+	*type = type_color & CQ_DESC_TYPE_MASK;
+	*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+	*completed_index = le16_to_cpu(desc->completed_index) &
+		CQ_DESC_COMP_NDX_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h
new file mode 100644
index 0000000..a9fa26f
--- /dev/null
+++ b/drivers/scsi/fnic/cq_enet_desc.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+	__le16 completed_index;
+	__le16 q_number;
+	u8 reserved[11];
+	u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+	u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+	cq_desc_dec((struct cq_desc *)desc, type,
+		color, q_number, completed_index);
+}
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+	__le16 completed_index_flags;
+	__le16 q_number_rss_type_flags;
+	__le32 rss_hash;
+	__le16 bytes_written_flags;
+	__le16 vlan;
+	__le16 checksum_fcoe;
+	u8 flags;
+	u8 type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT          (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE                  (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP                   (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP                   (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS               4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+	((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE               0
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4               1
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4           2
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6               3
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6           4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX            5
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX        6
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC         (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS          14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+	((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED             (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED         (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS               4
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+	((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS               8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+	((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT              8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK       (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK              (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP                   (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR              (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP                   (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK          (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6                  (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4                  (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT         (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK                (0x1 << 7)
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
+	u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
+	u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+	u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
+	u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
+	u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
+	u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
+	u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+{
+	u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+	u16 q_number_rss_type_flags =
+		le16_to_cpu(desc->q_number_rss_type_flags);
+	u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+	cq_desc_dec((struct cq_desc *)desc, type,
+		color, q_number, completed_index);
+
+	*ingress_port = (completed_index_flags &
+		CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+	*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+		1 : 0;
+	*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+		1 : 0;
+	*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+		1 : 0;
+
+	*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+		CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+	*csum_not_calc = (q_number_rss_type_flags &
+		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+	*rss_hash = le32_to_cpu(desc->rss_hash);
+
+	*bytes_written = bytes_written_flags &
+		CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+	*packet_error = (bytes_written_flags &
+		CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+	*vlan_stripped = (bytes_written_flags &
+		CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+	*vlan = le16_to_cpu(desc->vlan);
+
+	if (*fcoe) {
+		*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+			CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+		*fcoe_fc_crc_ok = (desc->flags &
+			CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+		*fcoe_enc_error = (desc->flags &
+			CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+		*fcoe_eof = (u8)((desc->checksum_fcoe >>
+			CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+			CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+		*checksum = 0;
+	} else {
+		*fcoe_sof = 0;
+		*fcoe_fc_crc_ok = 0;
+		*fcoe_enc_error = 0;
+		*fcoe_eof = 0;
+		*checksum = le16_to_cpu(desc->checksum_fcoe);
+	}
+
+	*tcp_udp_csum_ok =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+	*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+	*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+	*ipv4_csum_ok =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+	*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+	*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+	*ipv4_fragment =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+	*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h
new file mode 100644
index 0000000..501660c
--- /dev/null
+++ b/drivers/scsi/fnic/cq_exch_desc.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_EXCH_DESC_H_
+#define _CQ_EXCH_DESC_H_
+
+#include "cq_desc.h"
+
+/* Exchange completion queue descriptor: 16B */
+struct cq_exch_wq_desc {
+	u16 completed_index;
+	u16 q_number;
+	u16 exchange_id;
+	u8  tmpl;
+	u8  reserved0;
+	u32 reserved1;
+	u8  exch_status;
+	u8  reserved2[2];
+	u8  type_color;
+};
+
+#define CQ_EXCH_WQ_STATUS_BITS      2
+#define CQ_EXCH_WQ_STATUS_MASK      ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1)
+
+enum cq_exch_status_types {
+	CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0,
+	CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1,
+	CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2,
+	CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3,
+};
+
+static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr,
+				       u8  *type,
+				       u8  *color,
+				       u16 *q_number,
+				       u16 *completed_index,
+				       u8  *exch_status)
+{
+	cq_desc_dec((struct cq_desc *)desc_ptr, type,
+		    color, q_number, completed_index);
+	*exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK;
+}
+
+struct cq_fcp_rq_desc {
+	u16 completed_index_eop_sop_prt;
+	u16 q_number;
+	u16 exchange_id;
+	u16 tmpl;
+	u16 bytes_written;
+	u16 vlan;
+	u8  sof;
+	u8  eof;
+	u8  fcs_fer_fck;
+	u8  type_color;
+};
+
+#define CQ_FCP_RQ_DESC_FLAGS_SOP		(1 << 15)
+#define CQ_FCP_RQ_DESC_FLAGS_EOP		(1 << 14)
+#define CQ_FCP_RQ_DESC_FLAGS_PRT		(1 << 12)
+#define CQ_FCP_RQ_DESC_TMPL_MASK		0x1f
+#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK	0x3fff
+#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT		14
+#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT)
+#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT	15
+#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT)
+#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK		0x1
+#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT		1
+#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT)
+#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT		7
+#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT)
+
+static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr,
+				      u8  *type,
+				      u8  *color,
+				      u16 *q_number,
+				      u16 *completed_index,
+				      u8  *eop,
+				      u8  *sop,
+				      u8  *fck,
+				      u16 *exchange_id,
+				      u16 *tmpl,
+				      u32 *bytes_written,
+				      u8  *sof,
+				      u8  *eof,
+				      u8  *ingress_port,
+				      u8  *packet_err,
+				      u8  *fcoe_err,
+				      u8  *fcs_ok,
+				      u8  *vlan_stripped,
+				      u16 *vlan)
+{
+	cq_desc_dec((struct cq_desc *)desc_ptr, type,
+		    color, q_number, completed_index);
+	*eop = (desc_ptr->completed_index_eop_sop_prt &
+		CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0;
+	*sop = (desc_ptr->completed_index_eop_sop_prt &
+		CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0;
+	*ingress_port =
+		(desc_ptr->completed_index_eop_sop_prt &
+		 CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0;
+	*exchange_id = desc_ptr->exchange_id;
+	*tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK;
+	*bytes_written =
+		desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK;
+	*packet_err =
+		(desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >>
+		CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT;
+	*vlan_stripped =
+		(desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >>
+		CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT;
+	*vlan = desc_ptr->vlan;
+	*sof = desc_ptr->sof;
+	*fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK;
+	*fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >>
+		CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT;
+	*eof = desc_ptr->eof;
+	*fcs_ok =
+		(desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >>
+		CQ_FCP_RQ_DESC_FCS_OK_SHIFT;
+}
+
+struct cq_sgl_desc {
+	u16 exchange_id;
+	u16 q_number;
+	u32 active_burst_offset;
+	u32 tot_data_bytes;
+	u16 tmpl;
+	u8  sgl_err;
+	u8  type_color;
+};
+
+enum cq_sgl_err_types {
+	CQ_SGL_ERR_NO_ERROR = 0,
+	CQ_SGL_ERR_OVERFLOW,         /* data ran beyond end of SGL */
+	CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/
+	CQ_SGL_ERR_ADDR_RSP_ERR,     /* sgl address error */
+	CQ_SGL_ERR_DATA_RSP_ERR,     /* sgl data rsp error */
+	CQ_SGL_ERR_CNT_ZERO_ERR,     /* SGL count is 0 */
+	CQ_SGL_ERR_CNT_MAX_ERR,      /* SGL count is larger than supported */
+	CQ_SGL_ERR_ORDER_ERR,        /* frames recv on both ports, order err */
+	CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */
+	CQ_SGL_ERR_HOST_CQ_ERR,      /* host cq entry to local vnic addr ill */
+};
+
+#define CQ_SGL_SGL_ERR_MASK             0x1f
+#define CQ_SGL_TMPL_MASK                0x1f
+
+static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr,
+				   u8  *type,
+				   u8  *color,
+				   u16 *q_number,
+				   u16 *exchange_id,
+				   u32 *active_burst_offset,
+				   u32 *tot_data_bytes,
+				   u16 *tmpl,
+				   u8  *sgl_err)
+{
+	/* Cheat a little by assuming exchange_id is the same as completed
+	   index */
+	cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number,
+		    exchange_id);
+	*active_burst_offset = desc_ptr->active_burst_offset;
+	*tot_data_bytes = desc_ptr->tot_data_bytes;
+	*tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK;
+	*sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK;
+}
+
+#endif /* _CQ_EXCH_DESC_H_ */
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h
new file mode 100644
index 0000000..12d770d
--- /dev/null
+++ b/drivers/scsi/fnic/fcpio.h
@@ -0,0 +1,780 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FCPIO_H_
+#define _FCPIO_H_
+
+#include <linux/if_ether.h>
+
+/*
+ * This header file includes all of the data structures used for
+ * communication by the host driver to the fcp firmware.
+ */
+
+/*
+ * Exchange and sequence id space allocated to the host driver
+ */
+#define FCPIO_HOST_EXCH_RANGE_START         0x1000
+#define FCPIO_HOST_EXCH_RANGE_END           0x1fff
+#define FCPIO_HOST_SEQ_ID_RANGE_START       0x80
+#define FCPIO_HOST_SEQ_ID_RANGE_END         0xff
+
+/*
+ * Command entry type
+ */
+enum fcpio_type {
+	/*
+	 * Initiator request types
+	 */
+	FCPIO_ICMND_16 = 0x1,
+	FCPIO_ICMND_32,
+	FCPIO_ICMND_CMPL,
+	FCPIO_ITMF,
+	FCPIO_ITMF_CMPL,
+
+	/*
+	 * Target request types
+	 */
+	FCPIO_TCMND_16 = 0x11,
+	FCPIO_TCMND_32,
+	FCPIO_TDATA,
+	FCPIO_TXRDY,
+	FCPIO_TRSP,
+	FCPIO_TDRSP_CMPL,
+	FCPIO_TTMF,
+	FCPIO_TTMF_ACK,
+	FCPIO_TABORT,
+	FCPIO_TABORT_CMPL,
+
+	/*
+	 * Misc request types
+	 */
+	FCPIO_ACK = 0x20,
+	FCPIO_RESET,
+	FCPIO_RESET_CMPL,
+	FCPIO_FLOGI_REG,
+	FCPIO_FLOGI_REG_CMPL,
+	FCPIO_ECHO,
+	FCPIO_ECHO_CMPL,
+	FCPIO_LUNMAP_CHNG,
+	FCPIO_LUNMAP_REQ,
+	FCPIO_LUNMAP_REQ_CMPL,
+	FCPIO_FLOGI_FIP_REG,
+	FCPIO_FLOGI_FIP_REG_CMPL,
+};
+
+/*
+ * Header status codes from the firmware
+ */
+enum fcpio_status {
+	FCPIO_SUCCESS = 0,              /* request was successful */
+
+	/*
+	 * If a request to the firmware is rejected, the original request
+	 * header will be returned with the status set to one of the following:
+	 */
+	FCPIO_INVALID_HEADER,    /* header contains invalid data */
+	FCPIO_OUT_OF_RESOURCE,   /* out of resources to complete request */
+	FCPIO_INVALID_PARAM,     /* some parameter in request is invalid */
+	FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */
+	FCPIO_IO_NOT_FOUND,      /* requested I/O was not found */
+
+	/*
+	 * Once a request is processed, the firmware will usually return
+	 * a cmpl message type.  In cases where errors occurred,
+	 * the header status field would be filled in with one of the following:
+	 */
+	FCPIO_ABORTED = 0x41,     /* request was aborted */
+	FCPIO_TIMEOUT,            /* request was timed out */
+	FCPIO_SGL_INVALID,        /* request was aborted due to sgl error */
+	FCPIO_MSS_INVALID,        /* request was aborted due to mss error */
+	FCPIO_DATA_CNT_MISMATCH,  /* recv/sent more/less data than exp. */
+	FCPIO_FW_ERR,             /* request was terminated due to fw error */
+	FCPIO_ITMF_REJECTED,      /* itmf req was rejected by remote node */
+	FCPIO_ITMF_FAILED,        /* itmf req was failed by remote node */
+	FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */
+	FCPIO_CMND_REJECTED,      /* request was invalid and rejected */
+	FCPIO_NO_PATH_AVAIL,      /* no paths to the lun was available */
+	FCPIO_PATH_FAILED,        /* i/o sent to current path failed */
+	FCPIO_LUNMAP_CHNG_PEND,   /* i/o rejected due to lunmap change */
+};
+
+/*
+ * The header command tag.  All host requests will use the "tag" field
+ * to mark commands with a unique tag.  When the firmware responds to
+ * a host request, it will copy the tag field into the response.
+ *
+ * The only firmware requests that will use the rx_id/ox_id fields instead
+ * of the tag field will be the target command and target task management
+ * requests.  These two requests do not have corresponding host requests
+ * since they come directly from the FC initiator on the network.
+ */
+struct fcpio_tag {
+	union {
+		u32 req_id;
+		struct {
+			u16 rx_id;
+			u16 ox_id;
+		} ex_id;
+	} u;
+};
+
+static inline void
+fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id)
+{
+	tag->u.req_id = id;
+}
+
+static inline void
+fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id)
+{
+	*id = tag->u.req_id;
+}
+
+static inline void
+fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id)
+{
+	tag->u.ex_id.rx_id = rx_id;
+	tag->u.ex_id.ox_id = ox_id;
+}
+
+static inline void
+fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id)
+{
+	*rx_id = tag->u.ex_id.rx_id;
+	*ox_id = tag->u.ex_id.ox_id;
+}
+
+/*
+ * The header for an fcpio request, whether from the firmware or from the
+ * host driver
+ */
+struct fcpio_header {
+	u8            type;           /* enum fcpio_type */
+	u8            status;         /* header status entry */
+	u16           _resvd;         /* reserved */
+	struct fcpio_tag    tag;      /* header tag */
+};
+
+static inline void
+fcpio_header_enc(struct fcpio_header *hdr,
+		 u8 type, u8 status,
+		 struct fcpio_tag tag)
+{
+	hdr->type = type;
+	hdr->status = status;
+	hdr->_resvd = 0;
+	hdr->tag = tag;
+}
+
+static inline void
+fcpio_header_dec(struct fcpio_header *hdr,
+		 u8 *type, u8 *status,
+		 struct fcpio_tag *tag)
+{
+	*type = hdr->type;
+	*status = hdr->status;
+	*tag = hdr->tag;
+}
+
+#define CDB_16      16
+#define CDB_32      32
+#define LUN_ADDRESS 8
+
+/*
+ * fcpio_icmnd_16: host -> firmware request
+ *
+ * used for sending out an initiator SCSI 16-byte command
+ */
+struct fcpio_icmnd_16 {
+	u32	  lunmap_id;		/* index into lunmap table */
+	u8	  special_req_flags;	/* special exchange request flags */
+	u8	  _resvd0[3];	        /* reserved */
+	u32	  sgl_cnt;		/* scatter-gather list count */
+	u32	  sense_len;		/* sense buffer length */
+	u64	  sgl_addr;		/* scatter-gather list addr */
+	u64	  sense_addr;		/* sense buffer address */
+	u8	  crn;			/* SCSI Command Reference No. */
+	u8	  pri_ta;		/* SCSI Priority and Task attribute */
+	u8	  _resvd1;		/* reserved: should be 0 */
+	u8	  flags;		/* command flags */
+	u8	  scsi_cdb[CDB_16];	/* SCSI Cmnd Descriptor Block */
+	u32	  data_len;		/* length of data expected */
+	u8	  lun[LUN_ADDRESS];	/* FC vNIC only: LUN address */
+	u8	  _resvd2;		/* reserved */
+	u8	  d_id[3];		/* FC vNIC only: Target D_ID */
+	u16	  mss;			/* FC vNIC only: max burst */
+	u16	  _resvd3;		/* reserved */
+	u32	  r_a_tov;		/* FC vNIC only: Res. Alloc Timeout */
+	u32	  e_d_tov;	        /* FC vNIC only: Err Detect Timeout */
+};
+
+/*
+ * Special request flags
+ */
+#define FCPIO_ICMND_SRFLAG_RETRY 0x01   /* Enable Retry handling on exchange */
+
+/*
+ * Priority/Task Attribute settings
+ */
+#define FCPIO_ICMND_PTA_SIMPLE      0   /* simple task attribute */
+#define FCPIO_ICMND_PTA_HEADQ       1   /* head of queue task attribute */
+#define FCPIO_ICMND_PTA_ORDERED     2   /* ordered task attribute */
+#define FCPIO_ICMND_PTA_ACA         4   /* auto contingent allegiance */
+#define FCPIO_ICMND_PRI_SHIFT       3   /* priority field starts in bit 3 */
+
+/*
+ * Command flags
+ */
+#define FCPIO_ICMND_RDDATA      0x02    /* read data */
+#define FCPIO_ICMND_WRDATA      0x01    /* write data */
+
+/*
+ * fcpio_icmnd_32: host -> firmware request
+ *
+ * used for sending out an initiator SCSI 32-byte command
+ */
+struct fcpio_icmnd_32 {
+	u32   lunmap_id;              /* index into lunmap table */
+	u8    special_req_flags;      /* special exchange request flags */
+	u8    _resvd0[3];             /* reserved */
+	u32   sgl_cnt;                /* scatter-gather list count */
+	u32   sense_len;              /* sense buffer length */
+	u64   sgl_addr;               /* scatter-gather list addr */
+	u64   sense_addr;             /* sense buffer address */
+	u8    crn;                    /* SCSI Command Reference No. */
+	u8    pri_ta;                 /* SCSI Priority and Task attribute */
+	u8    _resvd1;                /* reserved: should be 0 */
+	u8    flags;                  /* command flags */
+	u8    scsi_cdb[CDB_32];       /* SCSI Cmnd Descriptor Block */
+	u32   data_len;               /* length of data expected */
+	u8    lun[LUN_ADDRESS];       /* FC vNIC only: LUN address */
+	u8    _resvd2;                /* reserved */
+	u8    d_id[3];		      /* FC vNIC only: Target D_ID */
+	u16   mss;                    /* FC vNIC only: max burst */
+	u16   _resvd3;                /* reserved */
+	u32   r_a_tov;                /* FC vNIC only: Res. Alloc Timeout */
+	u32   e_d_tov;                /* FC vNIC only: Error Detect Timeout */
+};
+
+/*
+ * fcpio_itmf: host -> firmware request
+ *
+ * used for requesting the firmware to abort a request and/or send out
+ * a task management function
+ *
+ * The t_tag field is only needed when the request type is ABT_TASK.
+ */
+struct fcpio_itmf {
+	u32   lunmap_id;              /* index into lunmap table */
+	u32   tm_req;                 /* SCSI Task Management request */
+	u32   t_tag;                  /* header tag of fcpio to be aborted */
+	u32   _resvd;                 /* _reserved */
+	u8    lun[LUN_ADDRESS];       /* FC vNIC only: LUN address */
+	u8    _resvd1;                /* reserved */
+	u8    d_id[3];		      /* FC vNIC only: Target D_ID */
+	u32   r_a_tov;                /* FC vNIC only: R_A_TOV in msec */
+	u32   e_d_tov;                /* FC vNIC only: E_D_TOV in msec */
+};
+
+/*
+ * Task Management request
+ */
+enum fcpio_itmf_tm_req_type {
+	FCPIO_ITMF_ABT_TASK_TERM = 0x01,    /* abort task and terminate */
+	FCPIO_ITMF_ABT_TASK,                /* abort task and issue abts */
+	FCPIO_ITMF_ABT_TASK_SET,            /* abort task set */
+	FCPIO_ITMF_CLR_TASK_SET,            /* clear task set */
+	FCPIO_ITMF_LUN_RESET,               /* logical unit reset task mgmt */
+	FCPIO_ITMF_CLR_ACA,                 /* Clear ACA condition */
+};
+
+/*
+ * fcpio_tdata: host -> firmware request
+ *
+ * used for requesting the firmware to send out a read data transfer for a
+ * target command
+ */
+struct fcpio_tdata {
+	u16   rx_id;                  /* FC rx_id of target command */
+	u16   flags;                  /* command flags */
+	u32   rel_offset;             /* data sequence relative offset */
+	u32   sgl_cnt;                /* scatter-gather list count */
+	u32   data_len;               /* length of data expected to send */
+	u64   sgl_addr;               /* scatter-gather list address */
+};
+
+/*
+ * Command flags
+ */
+#define FCPIO_TDATA_SCSI_RSP    0x01    /* send a scsi resp. after last frame */
+
+/*
+ * fcpio_txrdy: host -> firmware request
+ *
+ * used for requesting the firmware to send out a write data transfer for a
+ * target command
+ */
+struct fcpio_txrdy {
+	u16   rx_id;                  /* FC rx_id of target command */
+	u16   _resvd0;                /* reserved */
+	u32   rel_offset;             /* data sequence relative offset */
+	u32   sgl_cnt;                /* scatter-gather list count */
+	u32   data_len;               /* length of data expected to send */
+	u64   sgl_addr;               /* scatter-gather list address */
+};
+
+/*
+ * fcpio_trsp: host -> firmware request
+ *
+ * used for requesting the firmware to send out a response for a target
+ * command
+ */
+struct fcpio_trsp {
+	u16   rx_id;                  /* FC rx_id of target command */
+	u16   _resvd0;                /* reserved */
+	u32   sense_len;              /* sense data buffer length */
+	u64   sense_addr;             /* sense data buffer address */
+	u16   _resvd1;                /* reserved */
+	u8    flags;                  /* response request flags */
+	u8    scsi_status;            /* SCSI status */
+	u32   residual;               /* SCSI data residual value of I/O */
+};
+
+/*
+ * resposnse request flags
+ */
+#define FCPIO_TRSP_RESID_UNDER  0x08   /* residual is valid and is underflow */
+#define FCPIO_TRSP_RESID_OVER   0x04   /* residual is valid and is overflow */
+
+/*
+ * fcpio_ttmf_ack: host -> firmware response
+ *
+ * used by the host to indicate to the firmware it has received and processed
+ * the target tmf request
+ */
+struct fcpio_ttmf_ack {
+	u16   rx_id;                  /* FC rx_id of target command */
+	u16   _resvd0;                /* reserved */
+	u32   tmf_status;             /* SCSI task management status */
+};
+
+/*
+ * fcpio_tabort: host -> firmware request
+ *
+ * used by the host to request the firmware to abort a target request that was
+ * received by the firmware
+ */
+struct fcpio_tabort {
+	u16   rx_id;                  /* rx_id of the target request */
+};
+
+/*
+ * fcpio_reset: host -> firmware request
+ *
+ * used by the host to signal a reset of the driver to the firmware
+ * and to request firmware to clean up all outstanding I/O
+ */
+struct fcpio_reset {
+	u32   _resvd;
+};
+
+enum fcpio_flogi_reg_format_type {
+	FCPIO_FLOGI_REG_DEF_DEST = 0,    /* Use the oui | s_id mac format */
+	FCPIO_FLOGI_REG_GW_DEST,         /* Use the fixed gateway mac */
+};
+
+/*
+ * fcpio_flogi_reg: host -> firmware request
+ *
+ * fc vnic only
+ * used by the host to notify the firmware of the lif's s_id
+ * and destination mac address format
+ */
+struct fcpio_flogi_reg {
+	u8 format;
+	u8 s_id[3];			/* FC vNIC only: Source S_ID */
+	u8 gateway_mac[ETH_ALEN];	/* Destination gateway mac */
+	u16 _resvd;
+	u32 r_a_tov;			/* R_A_TOV in msec */
+	u32 e_d_tov;			/* E_D_TOV in msec */
+};
+
+/*
+ * fcpio_echo: host -> firmware request
+ *
+ * sends a heartbeat echo request to the firmware
+ */
+struct fcpio_echo {
+	u32 _resvd;
+};
+
+/*
+ * fcpio_lunmap_req: host -> firmware request
+ *
+ * scsi vnic only
+ * sends a request to retrieve the lunmap table for scsi vnics
+ */
+struct fcpio_lunmap_req {
+	u64 addr;                     /* address of the buffer */
+	u32 len;                      /* len of the buffer */
+};
+
+/*
+ * fcpio_flogi_fip_reg: host -> firmware request
+ *
+ * fc vnic only
+ * used by the host to notify the firmware of the lif's s_id
+ * and destination mac address format
+ */
+struct fcpio_flogi_fip_reg {
+	u8    _resvd0;
+	u8     s_id[3];               /* FC vNIC only: Source S_ID */
+	u8     fcf_mac[ETH_ALEN];     /* FCF Target destination mac */
+	u16   _resvd1;
+	u32   r_a_tov;                /* R_A_TOV in msec */
+	u32   e_d_tov;                /* E_D_TOV in msec */
+	u8    ha_mac[ETH_ALEN];       /* Host adapter source mac */
+	u16   _resvd2;
+};
+
+/*
+ * Basic structure for all fcpio structures that are sent from the host to the
+ * firmware.  They are 128 bytes per structure.
+ */
+#define FCPIO_HOST_REQ_LEN      128     /* expected length of host requests */
+
+struct fcpio_host_req {
+	struct fcpio_header hdr;
+
+	union {
+		/*
+		 * Defines space needed for request
+		 */
+		u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)];
+
+		/*
+		 * Initiator host requests
+		 */
+		struct fcpio_icmnd_16               icmnd_16;
+		struct fcpio_icmnd_32               icmnd_32;
+		struct fcpio_itmf                   itmf;
+
+		/*
+		 * Target host requests
+		 */
+		struct fcpio_tdata                  tdata;
+		struct fcpio_txrdy                  txrdy;
+		struct fcpio_trsp                   trsp;
+		struct fcpio_ttmf_ack               ttmf_ack;
+		struct fcpio_tabort                 tabort;
+
+		/*
+		 * Misc requests
+		 */
+		struct fcpio_reset                  reset;
+		struct fcpio_flogi_reg              flogi_reg;
+		struct fcpio_echo                   echo;
+		struct fcpio_lunmap_req             lunmap_req;
+		struct fcpio_flogi_fip_reg          flogi_fip_reg;
+	} u;
+};
+
+/*
+ * fcpio_icmnd_cmpl: firmware -> host response
+ *
+ * used for sending the host a response to an initiator command
+ */
+struct fcpio_icmnd_cmpl {
+	u8    _resvd0[6];             /* reserved */
+	u8    flags;                  /* response flags */
+	u8    scsi_status;            /* SCSI status */
+	u32   residual;               /* SCSI data residual length */
+	u32   sense_len;              /* SCSI sense length */
+};
+
+/*
+ * response flags
+ */
+#define FCPIO_ICMND_CMPL_RESID_UNDER    0x08    /* resid under and valid */
+#define FCPIO_ICMND_CMPL_RESID_OVER     0x04    /* resid over and valid */
+
+/*
+ * fcpio_itmf_cmpl: firmware -> host response
+ *
+ * used for sending the host a response for a itmf request
+ */
+struct fcpio_itmf_cmpl {
+	u32    _resvd;                /* reserved */
+};
+
+/*
+ * fcpio_tcmnd_16: firmware -> host request
+ *
+ * used by the firmware to notify the host of an incoming target SCSI 16-Byte
+ * request
+ */
+struct fcpio_tcmnd_16 {
+	u8    lun[LUN_ADDRESS];       /* FC vNIC only: LUN address */
+	u8    crn;                    /* SCSI Command Reference No. */
+	u8    pri_ta;                 /* SCSI Priority and Task attribute */
+	u8    _resvd2;                /* reserved: should be 0 */
+	u8    flags;                  /* command flags */
+	u8    scsi_cdb[CDB_16];       /* SCSI Cmnd Descriptor Block */
+	u32   data_len;               /* length of data expected */
+	u8    _resvd1;                /* reserved */
+	u8    s_id[3];		      /* FC vNIC only: Source S_ID */
+};
+
+/*
+ * Priority/Task Attribute settings
+ */
+#define FCPIO_TCMND_PTA_SIMPLE      0   /* simple task attribute */
+#define FCPIO_TCMND_PTA_HEADQ       1   /* head of queue task attribute */
+#define FCPIO_TCMND_PTA_ORDERED     2   /* ordered task attribute */
+#define FCPIO_TCMND_PTA_ACA         4   /* auto contingent allegiance */
+#define FCPIO_TCMND_PRI_SHIFT       3   /* priority field starts in bit 3 */
+
+/*
+ * Command flags
+ */
+#define FCPIO_TCMND_RDDATA      0x02    /* read data */
+#define FCPIO_TCMND_WRDATA      0x01    /* write data */
+
+/*
+ * fcpio_tcmnd_32: firmware -> host request
+ *
+ * used by the firmware to notify the host of an incoming target SCSI 32-Byte
+ * request
+ */
+struct fcpio_tcmnd_32 {
+	u8    lun[LUN_ADDRESS];       /* FC vNIC only: LUN address */
+	u8    crn;                    /* SCSI Command Reference No. */
+	u8    pri_ta;                 /* SCSI Priority and Task attribute */
+	u8    _resvd2;                /* reserved: should be 0 */
+	u8    flags;                  /* command flags */
+	u8    scsi_cdb[CDB_32];       /* SCSI Cmnd Descriptor Block */
+	u32   data_len;               /* length of data expected */
+	u8    _resvd0;                /* reserved */
+	u8    s_id[3];		      /* FC vNIC only: Source S_ID */
+};
+
+/*
+ * fcpio_tdrsp_cmpl: firmware -> host response
+ *
+ * used by the firmware to notify the host of a response to a host target
+ * command
+ */
+struct fcpio_tdrsp_cmpl {
+	u16   rx_id;                  /* rx_id of the target request */
+	u16   _resvd0;                /* reserved */
+};
+
+/*
+ * fcpio_ttmf: firmware -> host request
+ *
+ * used by the firmware to notify the host of an incoming task management
+ * function request
+ */
+struct fcpio_ttmf {
+	u8    _resvd0;                /* reserved */
+	u8    s_id[3];		      /* FC vNIC only: Source S_ID */
+	u8    lun[LUN_ADDRESS];       /* FC vNIC only: LUN address */
+	u8    crn;                    /* SCSI Command Reference No. */
+	u8    _resvd2[3];             /* reserved */
+	u32   tmf_type;               /* task management request type */
+};
+
+/*
+ * Task Management request
+ */
+#define FCPIO_TTMF_CLR_ACA      0x40    /* Clear ACA condition */
+#define FCPIO_TTMF_LUN_RESET    0x10    /* logical unit reset task mgmt */
+#define FCPIO_TTMF_CLR_TASK_SET 0x04    /* clear task set */
+#define FCPIO_TTMF_ABT_TASK_SET 0x02    /* abort task set */
+#define FCPIO_TTMF_ABT_TASK     0x01    /* abort task */
+
+/*
+ * fcpio_tabort_cmpl: firmware -> host response
+ *
+ * used by the firmware to respond to a host's tabort request
+ */
+struct fcpio_tabort_cmpl {
+	u16   rx_id;                  /* rx_id of the target request */
+	u16   _resvd0;                /* reserved */
+};
+
+/*
+ * fcpio_ack: firmware -> host response
+ *
+ * used by firmware to notify the host of the last work request received
+ */
+struct fcpio_ack {
+	u16  request_out;             /* last host entry received */
+	u16  _resvd;
+};
+
+/*
+ * fcpio_reset_cmpl: firmware -> host response
+ *
+ * use by firmware to respond to the host's reset request
+ */
+struct fcpio_reset_cmpl {
+	u16   vnic_id;
+};
+
+/*
+ * fcpio_flogi_reg_cmpl: firmware -> host response
+ *
+ * fc vnic only
+ * response to the fcpio_flogi_reg request
+ */
+struct fcpio_flogi_reg_cmpl {
+	u32 _resvd;
+};
+
+/*
+ * fcpio_echo_cmpl: firmware -> host response
+ *
+ * response to the fcpio_echo request
+ */
+struct fcpio_echo_cmpl {
+	u32 _resvd;
+};
+
+/*
+ * fcpio_lunmap_chng: firmware -> host notification
+ *
+ * scsi vnic only
+ * notifies the host that the lunmap tables have changed
+ */
+struct fcpio_lunmap_chng {
+	u32 _resvd;
+};
+
+/*
+ * fcpio_lunmap_req_cmpl: firmware -> host response
+ *
+ * scsi vnic only
+ * response for lunmap table request from the host
+ */
+struct fcpio_lunmap_req_cmpl {
+	u32 _resvd;
+};
+
+/*
+ * Basic structure for all fcpio structures that are sent from the firmware to
+ * the host.  They are 64 bytes per structure.
+ */
+#define FCPIO_FW_REQ_LEN        64      /* expected length of fw requests */
+struct fcpio_fw_req {
+	struct fcpio_header hdr;
+
+	union {
+		/*
+		 * Defines space needed for request
+		 */
+		u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)];
+
+		/*
+		 * Initiator firmware responses
+		 */
+		struct fcpio_icmnd_cmpl         icmnd_cmpl;
+		struct fcpio_itmf_cmpl          itmf_cmpl;
+
+		/*
+		 * Target firmware new requests
+		 */
+		struct fcpio_tcmnd_16           tcmnd_16;
+		struct fcpio_tcmnd_32           tcmnd_32;
+
+		/*
+		 * Target firmware responses
+		 */
+		struct fcpio_tdrsp_cmpl         tdrsp_cmpl;
+		struct fcpio_ttmf               ttmf;
+		struct fcpio_tabort_cmpl        tabort_cmpl;
+
+		/*
+		 * Firmware response to work received
+		 */
+		struct fcpio_ack                ack;
+
+		/*
+		 * Misc requests
+		 */
+		struct fcpio_reset_cmpl         reset_cmpl;
+		struct fcpio_flogi_reg_cmpl     flogi_reg_cmpl;
+		struct fcpio_echo_cmpl          echo_cmpl;
+		struct fcpio_lunmap_chng        lunmap_chng;
+		struct fcpio_lunmap_req_cmpl    lunmap_req_cmpl;
+	} u;
+};
+
+/*
+ * Access routines to encode and decode the color bit, which is the most
+ * significant bit of the MSB of the structure
+ */
+static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color)
+{
+	u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
+
+	if (color)
+		*c |= 0x80;
+	else
+		*c &= ~0x80;
+}
+
+static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color)
+{
+	u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
+
+	*color = *c >> 7;
+
+	/*
+	 * Make sure color bit is read from desc *before* other fields
+	 * are read from desc.  Hardware guarantees color bit is last
+	 * bit (byte) written.  Adding the rmb() prevents the compiler
+	 * and/or CPU from reordering the reads which would potentially
+	 * result in reading stale values.
+	 */
+
+	rmb();
+
+}
+
+/*
+ * Lunmap table entry for scsi vnics
+ */
+#define FCPIO_LUNMAP_TABLE_SIZE     256
+#define FCPIO_FLAGS_LUNMAP_VALID    0x80
+#define FCPIO_FLAGS_BOOT            0x01
+struct fcpio_lunmap_entry {
+	u8    bus;
+	u8    target;
+	u8    lun;
+	u8    path_cnt;
+	u16   flags;
+	u16   update_cnt;
+};
+
+struct fcpio_lunmap_tbl {
+	u32                   update_cnt;
+	struct fcpio_lunmap_entry   lunmaps[FCPIO_LUNMAP_TABLE_SIZE];
+};
+
+#endif /* _FCPIO_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
new file mode 100644
index 0000000..e4c0a3d
--- /dev/null
+++ b/drivers/scsi/fnic/fnic.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_H_
+#define _FNIC_H_
+
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <scsi/libfc.h>
+#include "fnic_io.h"
+#include "fnic_res.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_wq_copy.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_scsi.h"
+
+#define DRV_NAME		"fnic"
+#define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
+#define DRV_VERSION		"1.0.0.1121"
+#define PFX			DRV_NAME ": "
+#define DFX                     DRV_NAME "%d: "
+
+#define DESC_CLEAN_LOW_WATERMARK 8
+#define FNIC_MAX_IO_REQ		2048 /* scsi_cmnd tag map entries */
+#define	FNIC_IO_LOCKS		64 /* IO locks: power of 2 */
+#define FNIC_DFLT_QUEUE_DEPTH	32
+#define	FNIC_STATS_RATE_LIMIT	4 /* limit rate at which stats are pulled up */
+
+/*
+ * Tag bits used for special requests.
+ */
+#define BIT(nr)			(1UL << (nr))
+#define FNIC_TAG_ABORT		BIT(30)		/* tag bit indicating abort */
+#define FNIC_TAG_DEV_RST	BIT(29)		/* indicates device reset */
+#define FNIC_TAG_MASK		(BIT(24) - 1)	/* mask for lookup */
+#define FNIC_NO_TAG             -1
+
+/*
+ * Usage of the scsi_cmnd scratchpad.
+ * These fields are locked by the hashed io_req_lock.
+ */
+#define CMD_SP(Cmnd)		((Cmnd)->SCp.ptr)
+#define CMD_STATE(Cmnd)		((Cmnd)->SCp.phase)
+#define CMD_ABTS_STATUS(Cmnd)	((Cmnd)->SCp.Message)
+#define CMD_LR_STATUS(Cmnd)	((Cmnd)->SCp.have_data_in)
+#define CMD_TAG(Cmnd)           ((Cmnd)->SCp.sent_command)
+
+#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
+
+#define FNIC_LUN_RESET_TIMEOUT	     10000	/* mSec */
+#define FNIC_HOST_RESET_TIMEOUT	     10000	/* mSec */
+#define FNIC_RMDEVICE_TIMEOUT        1000       /* mSec */
+#define FNIC_HOST_RESET_SETTLE_TIME  30         /* Sec */
+
+#define FNIC_MAX_FCP_TARGET     256
+
+extern unsigned int fnic_log_level;
+
+#define FNIC_MAIN_LOGGING 0x01
+#define FNIC_FCS_LOGGING 0x02
+#define FNIC_SCSI_LOGGING 0x04
+#define FNIC_ISR_LOGGING 0x08
+
+#define FNIC_CHECK_LOGGING(LEVEL, CMD)				\
+do {								\
+	if (unlikely(fnic_log_level & LEVEL))			\
+		do {						\
+			CMD;					\
+		} while (0);					\
+} while (0)
+
+#define FNIC_MAIN_DBG(kern_level, host, fmt, args...)		\
+	FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING,			\
+			 shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_FCS_DBG(kern_level, host, fmt, args...)		\
+	FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING,			\
+			 shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_SCSI_DBG(kern_level, host, fmt, args...)		\
+	FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING,			\
+			 shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_ISR_DBG(kern_level, host, fmt, args...)		\
+	FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING,			\
+			 shost_printk(kern_level, host, fmt, ##args);)
+
+extern const char *fnic_state_str[];
+
+enum fnic_intx_intr_index {
+	FNIC_INTX_WQ_RQ_COPYWQ,
+	FNIC_INTX_ERR,
+	FNIC_INTX_NOTIFY,
+	FNIC_INTX_INTR_MAX,
+};
+
+enum fnic_msix_intr_index {
+	FNIC_MSIX_RQ,
+	FNIC_MSIX_WQ,
+	FNIC_MSIX_WQ_COPY,
+	FNIC_MSIX_ERR_NOTIFY,
+	FNIC_MSIX_INTR_MAX,
+};
+
+struct fnic_msix_entry {
+	int requested;
+	char devname[IFNAMSIZ];
+	irqreturn_t (*isr)(int, void *);
+	void *devid;
+};
+
+enum fnic_state {
+	FNIC_IN_FC_MODE = 0,
+	FNIC_IN_FC_TRANS_ETH_MODE,
+	FNIC_IN_ETH_MODE,
+	FNIC_IN_ETH_TRANS_FC_MODE,
+};
+
+#define FNIC_WQ_COPY_MAX 1
+#define FNIC_WQ_MAX 1
+#define FNIC_RQ_MAX 1
+#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
+
+struct mempool;
+
+/* Per-instance private data structure */
+struct fnic {
+	struct fc_lport *lport;
+	struct vnic_dev_bar bar0;
+
+	struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
+	struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
+
+	struct vnic_stats *stats;
+	unsigned long stats_time;	/* time of stats update */
+	struct vnic_nic_cfg *nic_cfg;
+	char name[IFNAMSIZ];
+	struct timer_list notify_timer; /* used for MSI interrupts */
+
+	unsigned int err_intr_offset;
+	unsigned int link_intr_offset;
+
+	unsigned int wq_count;
+	unsigned int cq_count;
+
+	u32 fcoui_mode:1;		/* use fcoui address*/
+	u32 vlan_hw_insert:1;	        /* let hw insert the tag */
+	u32 in_remove:1;                /* fnic device in removal */
+	u32 stop_rx_link_events:1;      /* stop proc. rx frames, link events */
+
+	struct completion *remove_wait; /* device remove thread blocks */
+
+	struct fc_frame *flogi;
+	struct fc_frame *flogi_resp;
+	u16 flogi_oxid;
+	unsigned long s_id;
+	enum fnic_state state;
+	spinlock_t fnic_lock;
+
+	u16 vlan_id;	                /* VLAN tag including priority */
+	u8 mac_addr[ETH_ALEN];
+	u8 dest_addr[ETH_ALEN];
+	u8 data_src_addr[ETH_ALEN];
+	u64 fcp_input_bytes;		/* internal statistic */
+	u64 fcp_output_bytes;		/* internal statistic */
+	u32 link_down_cnt;
+	int link_status;
+
+	struct list_head list;
+	struct pci_dev *pdev;
+	struct vnic_fc_config config;
+	struct vnic_dev *vdev;
+	unsigned int raw_wq_count;
+	unsigned int wq_copy_count;
+	unsigned int rq_count;
+	int fw_ack_index[FNIC_WQ_COPY_MAX];
+	unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX];
+	unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX];
+	unsigned int intr_count;
+	u32 __iomem *legacy_pba;
+	struct fnic_host_tag *tags;
+	mempool_t *io_req_pool;
+	mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
+	spinlock_t io_req_lock[FNIC_IO_LOCKS];	/* locks for scsi cmnds */
+
+	struct work_struct link_work;
+	struct work_struct frame_work;
+	struct sk_buff_head frame_queue;
+
+	/* copy work queue cache line section */
+	____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
+	/* completion queue cache line section */
+	____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
+
+	spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX];
+
+	/* work queue cache line section */
+	____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX];
+	spinlock_t wq_lock[FNIC_WQ_MAX];
+
+	/* receive queue cache line section */
+	____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX];
+
+	/* interrupt resource cache line section */
+	____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
+};
+
+extern struct workqueue_struct *fnic_event_queue;
+extern struct device_attribute *fnic_attrs[];
+
+void fnic_clear_intr_mode(struct fnic *fnic);
+int fnic_set_intr_mode(struct fnic *fnic);
+void fnic_free_intr(struct fnic *fnic);
+int fnic_request_intr(struct fnic *fnic);
+
+int fnic_send(struct fc_lport *, struct fc_frame *);
+void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+void fnic_handle_frame(struct work_struct *work);
+void fnic_handle_link(struct work_struct *work);
+int fnic_rq_cmpl_handler(struct fnic *fnic, int);
+int fnic_alloc_rq_frame(struct vnic_rq *rq);
+void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
+int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp);
+
+int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
+int fnic_abort_cmd(struct scsi_cmnd *);
+int fnic_device_reset(struct scsi_cmnd *);
+int fnic_host_reset(struct scsi_cmnd *);
+int fnic_reset(struct Scsi_Host *);
+void fnic_scsi_cleanup(struct fc_lport *);
+void fnic_scsi_abort_io(struct fc_lport *);
+void fnic_empty_scsi_cleanup(struct fc_lport *);
+void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
+int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
+int fnic_wq_cmpl_handler(struct fnic *fnic, int);
+int fnic_flogi_reg_handler(struct fnic *fnic);
+void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
+				  struct fcpio_host_req *desc);
+int fnic_fw_reset_handler(struct fnic *fnic);
+void fnic_terminate_rport_io(struct fc_rport *);
+const char *fnic_state_to_str(unsigned int state);
+
+void fnic_log_q_error(struct fnic *fnic);
+void fnic_handle_link_event(struct fnic *fnic);
+
+#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
new file mode 100644
index 0000000..aea0c3b
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/string.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include "fnic.h"
+
+static ssize_t fnic_show_state(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct fc_lport *lp = shost_priv(class_to_shost(dev));
+	struct fnic *fnic = lport_priv(lp);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
+}
+
+static ssize_t fnic_show_drv_version(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+static ssize_t fnic_show_link_state(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct fc_lport *lp = shost_priv(class_to_shost(dev));
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
+			? "Link Up" : "Link Down");
+}
+
+static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
+static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
+
+struct device_attribute *fnic_attrs[] = {
+	&dev_attr_fnic_state,
+	&dev_attr_drv_version,
+	&dev_attr_link_state,
+	NULL,
+};
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
new file mode 100644
index 0000000..07e6eed
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -0,0 +1,742 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/workqueue.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc_frame.h>
+#include <scsi/libfc.h>
+#include "fnic_io.h"
+#include "fnic.h"
+#include "cq_enet_desc.h"
+#include "cq_exch_desc.h"
+
+struct workqueue_struct *fnic_event_queue;
+
+void fnic_handle_link(struct work_struct *work)
+{
+	struct fnic *fnic = container_of(work, struct fnic, link_work);
+	unsigned long flags;
+	int old_link_status;
+	u32 old_link_down_cnt;
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+	if (fnic->stop_rx_link_events) {
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		return;
+	}
+
+	old_link_down_cnt = fnic->link_down_cnt;
+	old_link_status = fnic->link_status;
+	fnic->link_status = vnic_dev_link_status(fnic->vdev);
+	fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
+
+	if (old_link_status == fnic->link_status) {
+		if (!fnic->link_status)
+			/* DOWN -> DOWN */
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		else {
+			if (old_link_down_cnt != fnic->link_down_cnt) {
+				/* UP -> DOWN -> UP */
+				fnic->lport->host_stats.link_failure_count++;
+				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+					     "link down\n");
+				fc_linkdown(fnic->lport);
+				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+					     "link up\n");
+				fc_linkup(fnic->lport);
+			} else
+				/* UP -> UP */
+				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		}
+	} else if (fnic->link_status) {
+		/* DOWN -> UP */
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
+		fc_linkup(fnic->lport);
+	} else {
+		/* UP -> DOWN */
+		fnic->lport->host_stats.link_failure_count++;
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
+		fc_linkdown(fnic->lport);
+	}
+
+}
+
+/*
+ * This function passes incoming fabric frames to libFC
+ */
+void fnic_handle_frame(struct work_struct *work)
+{
+	struct fnic *fnic = container_of(work, struct fnic, frame_work);
+	struct fc_lport *lp = fnic->lport;
+	unsigned long flags;
+	struct sk_buff *skb;
+	struct fc_frame *fp;
+
+	while ((skb = skb_dequeue(&fnic->frame_queue))) {
+
+		spin_lock_irqsave(&fnic->fnic_lock, flags);
+		if (fnic->stop_rx_link_events) {
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			dev_kfree_skb(skb);
+			return;
+		}
+		fp = (struct fc_frame *)skb;
+		/* if Flogi resp frame, register the address */
+		if (fr_flags(fp)) {
+			vnic_dev_add_addr(fnic->vdev,
+					  fnic->data_src_addr);
+			fr_flags(fp) = 0;
+		}
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+		fc_exch_recv(lp, lp->emp, fp);
+	}
+
+}
+
+static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
+					   u32 len, u8 sof, u8 eof)
+{
+	struct fc_frame *fp = (struct fc_frame *)skb;
+
+	skb_trim(skb, len);
+	fr_eof(fp) = eof;
+	fr_sof(fp) = sof;
+}
+
+
+static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len)
+{
+	struct fc_frame *fp;
+	struct ethhdr *eh;
+	struct vlan_ethhdr *vh;
+	struct fcoe_hdr *fcoe_hdr;
+	struct fcoe_crc_eof *ft;
+	u32    transport_len = 0;
+
+	eh = (struct ethhdr *)skb->data;
+	vh = (struct vlan_ethhdr *)skb->data;
+	if (vh->h_vlan_proto == htons(ETH_P_8021Q) &&
+	    vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) {
+		skb_pull(skb, sizeof(struct vlan_ethhdr));
+		transport_len += sizeof(struct vlan_ethhdr);
+	} else if (eh->h_proto == htons(ETH_P_FCOE)) {
+		transport_len += sizeof(struct ethhdr);
+		skb_pull(skb, sizeof(struct ethhdr));
+	} else
+		return -1;
+
+	fcoe_hdr = (struct fcoe_hdr *)skb->data;
+	if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
+		return -1;
+
+	fp = (struct fc_frame *)skb;
+	fc_frame_init(fp);
+	fr_sof(fp) = fcoe_hdr->fcoe_sof;
+	skb_pull(skb, sizeof(struct fcoe_hdr));
+	transport_len += sizeof(struct fcoe_hdr);
+
+	ft = (struct fcoe_crc_eof *)(skb->data + len -
+				     transport_len - sizeof(*ft));
+	fr_eof(fp) = ft->fcoe_eof;
+	skb_trim(skb, len - transport_len - sizeof(*ft));
+	return 0;
+}
+
+static inline int fnic_handle_flogi_resp(struct fnic *fnic,
+					 struct fc_frame *fp)
+{
+	u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
+	struct ethhdr *eth_hdr;
+	struct fc_frame_header *fh;
+	int ret = 0;
+	unsigned long flags;
+	struct fc_frame *old_flogi_resp = NULL;
+
+	fh = (struct fc_frame_header *)fr_hdr(fp);
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+	if (fnic->state == FNIC_IN_ETH_MODE) {
+
+		/*
+		 * Check if oxid matches on taking the lock. A new Flogi
+		 * issued by libFC might have changed the fnic cached oxid
+		 */
+		if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
+			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+				     "Flogi response oxid not"
+				     " matching cached oxid, dropping frame"
+				     "\n");
+			ret = -1;
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			dev_kfree_skb_irq(fp_skb(fp));
+			goto handle_flogi_resp_end;
+		}
+
+		/* Drop older cached flogi response frame, cache this frame */
+		old_flogi_resp = fnic->flogi_resp;
+		fnic->flogi_resp = fp;
+		fnic->flogi_oxid = FC_XID_UNKNOWN;
+
+		/*
+		 * this frame is part of flogi get the src mac addr from this
+		 * frame if the src mac is fcoui based then we mark the
+		 * address mode flag to use fcoui base for dst mac addr
+		 * otherwise we have to store the fcoe gateway addr
+		 */
+		eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
+		memcpy(mac, eth_hdr->h_source, ETH_ALEN);
+
+		if (ntoh24(mac) == FC_FCOE_OUI)
+			fnic->fcoui_mode = 1;
+		else {
+			fnic->fcoui_mode = 0;
+			memcpy(fnic->dest_addr, mac, ETH_ALEN);
+		}
+
+		/*
+		 * Except for Flogi frame, all outbound frames from us have the
+		 * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
+		 * the vnic MAC address as the Eth Src address
+		 */
+		fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
+
+		/* We get our s_id from the d_id of the flogi resp frame */
+		fnic->s_id = ntoh24(fh->fh_d_id);
+
+		/* Change state to reflect transition from Eth to FC mode */
+		fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
+
+	} else {
+		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+			     "Unexpected fnic state %s while"
+			     " processing flogi resp\n",
+			     fnic_state_to_str(fnic->state));
+		ret = -1;
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		dev_kfree_skb_irq(fp_skb(fp));
+		goto handle_flogi_resp_end;
+	}
+
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	/* Drop older cached frame */
+	if (old_flogi_resp)
+		dev_kfree_skb_irq(fp_skb(old_flogi_resp));
+
+	/*
+	 * send flogi reg request to firmware, this will put the fnic in
+	 * in FC mode
+	 */
+	ret = fnic_flogi_reg_handler(fnic);
+
+	if (ret < 0) {
+		int free_fp = 1;
+		spin_lock_irqsave(&fnic->fnic_lock, flags);
+		/*
+		 * free the frame is some other thread is not
+		 * pointing to it
+		 */
+		if (fnic->flogi_resp != fp)
+			free_fp = 0;
+		else
+			fnic->flogi_resp = NULL;
+
+		if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
+			fnic->state = FNIC_IN_ETH_MODE;
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		if (free_fp)
+			dev_kfree_skb_irq(fp_skb(fp));
+	}
+
+ handle_flogi_resp_end:
+	return ret;
+}
+
+/* Returns 1 for a response that matches cached flogi oxid */
+static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
+					       struct fc_frame *fp)
+{
+	struct fc_frame_header *fh;
+	int ret = 0;
+	u32 f_ctl;
+
+	fh = fc_frame_header_get(fp);
+	f_ctl = ntoh24(fh->fh_f_ctl);
+
+	if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
+	    fh->fh_r_ctl == FC_RCTL_ELS_REP &&
+	    (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
+	    fh->fh_type == FC_TYPE_ELS)
+		ret = 1;
+
+	return ret;
+}
+
+static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
+				    *cq_desc, struct vnic_rq_buf *buf,
+				    int skipped __attribute__((unused)),
+				    void *opaque)
+{
+	struct fnic *fnic = vnic_dev_priv(rq->vdev);
+	struct sk_buff *skb;
+	struct fc_frame *fp;
+	unsigned int eth_hdrs_stripped;
+	u8 type, color, eop, sop, ingress_port, vlan_stripped;
+	u8 fcoe = 0, fcoe_sof, fcoe_eof;
+	u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
+	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+	u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
+	u8 fcs_ok = 1, packet_error = 0;
+	u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
+	u32 rss_hash;
+	u16 exchange_id, tmpl;
+	u8 sof = 0;
+	u8 eof = 0;
+	u32 fcp_bytes_written = 0;
+	unsigned long flags;
+
+	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
+			 PCI_DMA_FROMDEVICE);
+	skb = buf->os_buf;
+	buf->os_buf = NULL;
+
+	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
+	if (type == CQ_DESC_TYPE_RQ_FCP) {
+		cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
+				   &type, &color, &q_number, &completed_index,
+				   &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
+				   &tmpl, &fcp_bytes_written, &sof, &eof,
+				   &ingress_port, &packet_error,
+				   &fcoe_enc_error, &fcs_ok, &vlan_stripped,
+				   &vlan);
+		eth_hdrs_stripped = 1;
+
+	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
+		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+				    &type, &color, &q_number, &completed_index,
+				    &ingress_port, &fcoe, &eop, &sop,
+				    &rss_type, &csum_not_calc, &rss_hash,
+				    &bytes_written, &packet_error,
+				    &vlan_stripped, &vlan, &checksum,
+				    &fcoe_sof, &fcoe_fc_crc_ok,
+				    &fcoe_enc_error, &fcoe_eof,
+				    &tcp_udp_csum_ok, &udp, &tcp,
+				    &ipv4_csum_ok, &ipv6, &ipv4,
+				    &ipv4_fragment, &fcs_ok);
+		eth_hdrs_stripped = 0;
+
+	} else {
+		/* wrong CQ type*/
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "fnic rq_cmpl wrong cq type x%x\n", type);
+		goto drop;
+	}
+
+	if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+			     "fnic rq_cmpl fcoe x%x fcsok x%x"
+			     " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
+			     " x%x\n",
+			     fcoe, fcs_ok, packet_error,
+			     fcoe_fc_crc_ok, fcoe_enc_error);
+		goto drop;
+	}
+
+	if (eth_hdrs_stripped)
+		fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
+	else if (fnic_import_rq_eth_pkt(skb, bytes_written))
+		goto drop;
+
+	fp = (struct fc_frame *)skb;
+
+	/*
+	 * If frame is an ELS response that matches the cached FLOGI OX_ID,
+	 * and is accept, issue flogi_reg_request copy wq request to firmware
+	 * to register the S_ID and determine whether FC_OUI mode or GW mode.
+	 */
+	if (is_matching_flogi_resp_frame(fnic, fp)) {
+		if (!eth_hdrs_stripped) {
+			if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
+				fnic_handle_flogi_resp(fnic, fp);
+				return;
+			}
+			/*
+			 * Recd. Flogi reject. No point registering
+			 * with fw, but forward to libFC
+			 */
+			goto forward;
+		}
+		goto drop;
+	}
+	if (!eth_hdrs_stripped)
+		goto drop;
+
+forward:
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	if (fnic->stop_rx_link_events) {
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		goto drop;
+	}
+	/* Use fr_flags to indicate whether succ. flogi resp or not */
+	fr_flags(fp) = 0;
+	fr_dev(fp) = fnic->lport;
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	skb_queue_tail(&fnic->frame_queue, skb);
+	queue_work(fnic_event_queue, &fnic->frame_work);
+
+	return;
+drop:
+	dev_kfree_skb_irq(skb);
+}
+
+static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
+				     struct cq_desc *cq_desc, u8 type,
+				     u16 q_number, u16 completed_index,
+				     void *opaque)
+{
+	struct fnic *fnic = vnic_dev_priv(vdev);
+
+	vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
+			VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
+			NULL);
+	return 0;
+}
+
+int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
+{
+	unsigned int tot_rq_work_done = 0, cur_work_done;
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < fnic->rq_count; i++) {
+		cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
+						fnic_rq_cmpl_handler_cont,
+						NULL);
+		if (cur_work_done) {
+			err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
+			if (err)
+				shost_printk(KERN_ERR, fnic->lport->host,
+					     "fnic_alloc_rq_frame cant alloc"
+					     " frame\n");
+		}
+		tot_rq_work_done += cur_work_done;
+	}
+
+	return tot_rq_work_done;
+}
+
+/*
+ * This function is called once at init time to allocate and fill RQ
+ * buffers. Subsequently, it is called in the interrupt context after RQ
+ * buffer processing to replenish the buffers in the RQ
+ */
+int fnic_alloc_rq_frame(struct vnic_rq *rq)
+{
+	struct fnic *fnic = vnic_dev_priv(rq->vdev);
+	struct sk_buff *skb;
+	u16 len;
+	dma_addr_t pa;
+
+	len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
+	skb = dev_alloc_skb(len);
+	if (!skb) {
+		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+			     "Unable to allocate RQ sk_buff\n");
+		return -ENOMEM;
+	}
+	skb_reset_mac_header(skb);
+	skb_reset_transport_header(skb);
+	skb_reset_network_header(skb);
+	skb_put(skb, len);
+	pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+	fnic_queue_rq_desc(rq, skb, pa, len);
+	return 0;
+}
+
+void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+	struct fc_frame *fp = buf->os_buf;
+	struct fnic *fnic = vnic_dev_priv(rq->vdev);
+
+	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
+			 PCI_DMA_FROMDEVICE);
+
+	dev_kfree_skb(fp_skb(fp));
+	buf->os_buf = NULL;
+}
+
+static inline int is_flogi_frame(struct fc_frame_header *fh)
+{
+	return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI;
+}
+
+int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
+{
+	struct vnic_wq *wq = &fnic->wq[0];
+	struct sk_buff *skb;
+	dma_addr_t pa;
+	struct ethhdr *eth_hdr;
+	struct vlan_ethhdr *vlan_hdr;
+	struct fcoe_hdr *fcoe_hdr;
+	struct fc_frame_header *fh;
+	u32 tot_len, eth_hdr_len;
+	int ret = 0;
+	unsigned long flags;
+
+	fh = fc_frame_header_get(fp);
+	skb = fp_skb(fp);
+
+	if (!fnic->vlan_hw_insert) {
+		eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
+		vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
+		eth_hdr = (struct ethhdr *)vlan_hdr;
+		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+		vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
+		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
+		fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
+	} else {
+		eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
+		eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
+		eth_hdr->h_proto = htons(ETH_P_FCOE);
+		fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
+	}
+
+	if (is_flogi_frame(fh)) {
+		fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
+		memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
+	} else {
+		if (fnic->fcoui_mode)
+			fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
+		else
+			memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
+		memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
+	}
+
+	tot_len = skb->len;
+	BUG_ON(tot_len % 4);
+
+	memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
+	fcoe_hdr->fcoe_sof = fr_sof(fp);
+	if (FC_FCOE_VER)
+		FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
+
+	pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
+
+	spin_lock_irqsave(&fnic->wq_lock[0], flags);
+
+	if (!vnic_wq_desc_avail(wq)) {
+		pci_unmap_single(fnic->pdev, pa,
+				 tot_len, PCI_DMA_TODEVICE);
+		ret = -1;
+		goto fnic_send_frame_end;
+	}
+
+	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
+			   fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
+fnic_send_frame_end:
+	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+
+	if (ret)
+		dev_kfree_skb_any(fp_skb(fp));
+
+	return ret;
+}
+
+/*
+ * fnic_send
+ * Routine to send a raw frame
+ */
+int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
+{
+	struct fnic *fnic = lport_priv(lp);
+	struct fc_frame_header *fh;
+	int ret = 0;
+	enum fnic_state old_state;
+	unsigned long flags;
+	struct fc_frame *old_flogi = NULL;
+	struct fc_frame *old_flogi_resp = NULL;
+
+	if (fnic->in_remove) {
+		dev_kfree_skb(fp_skb(fp));
+		ret = -1;
+		goto fnic_send_end;
+	}
+
+	fh = fc_frame_header_get(fp);
+	/* if not an Flogi frame, send it out, this is the common case */
+	if (!is_flogi_frame(fh))
+		return fnic_send_frame(fnic, fp);
+
+	/* Flogi frame, now enter the state machine */
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+again:
+	/* Get any old cached frames, free them after dropping lock */
+	old_flogi = fnic->flogi;
+	fnic->flogi = NULL;
+	old_flogi_resp = fnic->flogi_resp;
+	fnic->flogi_resp = NULL;
+
+	fnic->flogi_oxid = FC_XID_UNKNOWN;
+
+	old_state = fnic->state;
+	switch (old_state) {
+	case FNIC_IN_FC_MODE:
+	case FNIC_IN_ETH_TRANS_FC_MODE:
+	default:
+		fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+		vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+		if (old_flogi) {
+			dev_kfree_skb(fp_skb(old_flogi));
+			old_flogi = NULL;
+		}
+		if (old_flogi_resp) {
+			dev_kfree_skb(fp_skb(old_flogi_resp));
+			old_flogi_resp = NULL;
+		}
+
+		ret = fnic_fw_reset_handler(fnic);
+
+		spin_lock_irqsave(&fnic->fnic_lock, flags);
+		if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
+			goto again;
+		if (ret) {
+			fnic->state = old_state;
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			dev_kfree_skb(fp_skb(fp));
+			goto fnic_send_end;
+		}
+		old_flogi = fnic->flogi;
+		fnic->flogi = fp;
+		fnic->flogi_oxid = ntohs(fh->fh_ox_id);
+		old_flogi_resp = fnic->flogi_resp;
+		fnic->flogi_resp = NULL;
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		break;
+
+	case FNIC_IN_FC_TRANS_ETH_MODE:
+		/*
+		 * A reset is pending with the firmware. Store the flogi
+		 * and its oxid. The transition out of this state happens
+		 * only when Firmware completes the reset, either with
+		 * success or failed. If success, transition to
+		 * FNIC_IN_ETH_MODE, if fail, then transition to
+		 * FNIC_IN_FC_MODE
+		 */
+		fnic->flogi = fp;
+		fnic->flogi_oxid = ntohs(fh->fh_ox_id);
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		break;
+
+	case FNIC_IN_ETH_MODE:
+		/*
+		 * The fw/hw is already in eth mode. Store the oxid,
+		 * and send the flogi frame out. The transition out of this
+		 * state happens only we receive flogi response from the
+		 * network, and the oxid matches the cached oxid when the
+		 * flogi frame was sent out. If they match, then we issue
+		 * a flogi_reg request and transition to state
+		 * FNIC_IN_ETH_TRANS_FC_MODE
+		 */
+		fnic->flogi_oxid = ntohs(fh->fh_ox_id);
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		ret = fnic_send_frame(fnic, fp);
+		break;
+	}
+
+fnic_send_end:
+	if (old_flogi)
+		dev_kfree_skb(fp_skb(old_flogi));
+	if (old_flogi_resp)
+		dev_kfree_skb(fp_skb(old_flogi_resp));
+	return ret;
+}
+
+static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
+					struct cq_desc *cq_desc,
+					struct vnic_wq_buf *buf, void *opaque)
+{
+	struct sk_buff *skb = buf->os_buf;
+	struct fc_frame *fp = (struct fc_frame *)skb;
+	struct fnic *fnic = vnic_dev_priv(wq->vdev);
+
+	pci_unmap_single(fnic->pdev, buf->dma_addr,
+			 buf->len, PCI_DMA_TODEVICE);
+	dev_kfree_skb_irq(fp_skb(fp));
+	buf->os_buf = NULL;
+}
+
+static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
+				     struct cq_desc *cq_desc, u8 type,
+				     u16 q_number, u16 completed_index,
+				     void *opaque)
+{
+	struct fnic *fnic = vnic_dev_priv(vdev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
+	vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
+			fnic_wq_complete_frame_send, NULL);
+	spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
+
+	return 0;
+}
+
+int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
+{
+	unsigned int wq_work_done = 0;
+	unsigned int i;
+
+	for (i = 0; i < fnic->raw_wq_count; i++) {
+		wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
+						 work_to_do,
+						 fnic_wq_cmpl_handler_cont,
+						 NULL);
+	}
+
+	return wq_work_done;
+}
+
+
+void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+	struct fc_frame *fp = buf->os_buf;
+	struct fnic *fnic = vnic_dev_priv(wq->vdev);
+
+	pci_unmap_single(fnic->pdev, buf->dma_addr,
+			 buf->len, PCI_DMA_TODEVICE);
+
+	dev_kfree_skb(fp_skb(fp));
+	buf->os_buf = NULL;
+}
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
new file mode 100644
index 0000000..f0b8969
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_IO_H_
+#define _FNIC_IO_H_
+
+#include <scsi/fc/fc_fcp.h>
+
+#define FNIC_DFLT_SG_DESC_CNT  32
+#define FNIC_MAX_SG_DESC_CNT        1024    /* Maximum descriptors per sgl */
+#define FNIC_SG_DESC_ALIGN          16      /* Descriptor address alignment */
+
+struct host_sg_desc {
+	__le64 addr;
+	__le32 len;
+	u32 _resvd;
+};
+
+struct fnic_dflt_sgl_list {
+	struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT];
+};
+
+struct fnic_sgl_list {
+	struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT];
+};
+
+enum fnic_sgl_list_type {
+	FNIC_SGL_CACHE_DFLT = 0,  /* cache with default size sgl */
+	FNIC_SGL_CACHE_MAX,       /* cache with max size sgl */
+	FNIC_SGL_NUM_CACHES       /* number of sgl caches */
+};
+
+enum fnic_ioreq_state {
+	FNIC_IOREQ_CMD_PENDING = 0,
+	FNIC_IOREQ_ABTS_PENDING,
+	FNIC_IOREQ_ABTS_COMPLETE,
+	FNIC_IOREQ_CMD_COMPLETE,
+};
+
+struct fnic_io_req {
+	struct host_sg_desc *sgl_list; /* sgl list */
+	void *sgl_list_alloc; /* sgl list address used for free */
+	dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
+	dma_addr_t sgl_list_pa;	/* dma address for sgl list */
+	u16 sgl_cnt;
+	u8 sgl_type; /* device DMA descriptor list type */
+	u8 io_completed:1; /* set to 1 when fw completes IO */
+	u32 port_id; /* remote port DID */
+	struct completion *abts_done; /* completion for abts */
+	struct completion *dr_done; /* completion for device reset */
+};
+
+#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
new file mode 100644
index 0000000..2b30648
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "fnic_io.h"
+#include "fnic.h"
+
+static irqreturn_t fnic_isr_legacy(int irq, void *data)
+{
+	struct fnic *fnic = data;
+	u32 pba;
+	unsigned long work_done = 0;
+
+	pba = vnic_intr_legacy_pba(fnic->legacy_pba);
+	if (!pba)
+		return IRQ_NONE;
+
+	if (pba & (1 << FNIC_INTX_NOTIFY)) {
+		vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
+		fnic_handle_link_event(fnic);
+	}
+
+	if (pba & (1 << FNIC_INTX_ERR)) {
+		vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]);
+		fnic_log_q_error(fnic);
+	}
+
+	if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
+		work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
+		work_done += fnic_wq_cmpl_handler(fnic, 4);
+		work_done += fnic_rq_cmpl_handler(fnic, 4);
+
+		vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
+					 work_done,
+					 1 /* unmask intr */,
+					 1 /* reset intr timer */);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msi(int irq, void *data)
+{
+	struct fnic *fnic = data;
+	unsigned long work_done = 0;
+
+	work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
+	work_done += fnic_wq_cmpl_handler(fnic, 4);
+	work_done += fnic_rq_cmpl_handler(fnic, 4);
+
+	vnic_intr_return_credits(&fnic->intr[0],
+				 work_done,
+				 1 /* unmask intr */,
+				 1 /* reset intr timer */);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
+{
+	struct fnic *fnic = data;
+	unsigned long rq_work_done = 0;
+
+	rq_work_done = fnic_rq_cmpl_handler(fnic, 4);
+	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
+				 rq_work_done,
+				 1 /* unmask intr */,
+				 1 /* reset intr timer */);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
+{
+	struct fnic *fnic = data;
+	unsigned long wq_work_done = 0;
+
+	wq_work_done = fnic_wq_cmpl_handler(fnic, 4);
+	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
+				 wq_work_done,
+				 1 /* unmask intr */,
+				 1 /* reset intr timer */);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
+{
+	struct fnic *fnic = data;
+	unsigned long wq_copy_work_done = 0;
+
+	wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8);
+	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
+				 wq_copy_work_done,
+				 1 /* unmask intr */,
+				 1 /* reset intr timer */);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
+{
+	struct fnic *fnic = data;
+
+	vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
+	fnic_log_q_error(fnic);
+	fnic_handle_link_event(fnic);
+
+	return IRQ_HANDLED;
+}
+
+void fnic_free_intr(struct fnic *fnic)
+{
+	int i;
+
+	switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+	case VNIC_DEV_INTR_MODE_INTX:
+	case VNIC_DEV_INTR_MODE_MSI:
+		free_irq(fnic->pdev->irq, fnic);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSIX:
+		for (i = 0; i < ARRAY_SIZE(fnic->msix); i++)
+			if (fnic->msix[i].requested)
+				free_irq(fnic->msix_entry[i].vector,
+					 fnic->msix[i].devid);
+		break;
+
+	default:
+		break;
+	}
+}
+
+int fnic_request_intr(struct fnic *fnic)
+{
+	int err = 0;
+	int i;
+
+	switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+
+	case VNIC_DEV_INTR_MODE_INTX:
+		err = request_irq(fnic->pdev->irq, &fnic_isr_legacy,
+				  IRQF_SHARED, DRV_NAME, fnic);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSI:
+		err = request_irq(fnic->pdev->irq, &fnic_isr_msi,
+				  0, fnic->name, fnic);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSIX:
+
+		sprintf(fnic->msix[FNIC_MSIX_RQ].devname,
+			"%.11s-fcs-rq", fnic->name);
+		fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq;
+		fnic->msix[FNIC_MSIX_RQ].devid = fnic;
+
+		sprintf(fnic->msix[FNIC_MSIX_WQ].devname,
+			"%.11s-fcs-wq", fnic->name);
+		fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
+		fnic->msix[FNIC_MSIX_WQ].devid = fnic;
+
+		sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
+			"%.11s-scsi-wq", fnic->name);
+		fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
+		fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
+
+		sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
+			"%.11s-err-notify", fnic->name);
+		fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
+			fnic_isr_msix_err_notify;
+		fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
+
+		for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
+			err = request_irq(fnic->msix_entry[i].vector,
+					  fnic->msix[i].isr, 0,
+					  fnic->msix[i].devname,
+					  fnic->msix[i].devid);
+			if (err) {
+				shost_printk(KERN_ERR, fnic->lport->host,
+					     "MSIX: request_irq"
+					     " failed %d\n", err);
+				fnic_free_intr(fnic);
+				break;
+			}
+			fnic->msix[i].requested = 1;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return err;
+}
+
+int fnic_set_intr_mode(struct fnic *fnic)
+{
+	unsigned int n = ARRAY_SIZE(fnic->rq);
+	unsigned int m = ARRAY_SIZE(fnic->wq);
+	unsigned int o = ARRAY_SIZE(fnic->wq_copy);
+	unsigned int i;
+
+	/*
+	 * Set interrupt mode (INTx, MSI, MSI-X) depending
+	 * system capabilities.
+	 *
+	 * Try MSI-X first
+	 *
+	 * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
+	 * (last INTR is used for WQ/RQ errors and notification area)
+	 */
+
+	BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1);
+	for (i = 0; i < n + m + o + 1; i++)
+		fnic->msix_entry[i].entry = i;
+
+	if (fnic->rq_count >= n &&
+	    fnic->raw_wq_count >= m &&
+	    fnic->wq_copy_count >= o &&
+	    fnic->cq_count >= n + m + o) {
+		if (!pci_enable_msix(fnic->pdev, fnic->msix_entry,
+				    n + m + o + 1)) {
+			fnic->rq_count = n;
+			fnic->raw_wq_count = m;
+			fnic->wq_copy_count = o;
+			fnic->wq_count = m + o;
+			fnic->cq_count = n + m + o;
+			fnic->intr_count = n + m + o + 1;
+			fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
+
+			FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+				     "Using MSI-X Interrupts\n");
+			vnic_dev_set_intr_mode(fnic->vdev,
+					       VNIC_DEV_INTR_MODE_MSIX);
+			return 0;
+		}
+	}
+
+	/*
+	 * Next try MSI
+	 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR
+	 */
+	if (fnic->rq_count >= 1 &&
+	    fnic->raw_wq_count >= 1 &&
+	    fnic->wq_copy_count >= 1 &&
+	    fnic->cq_count >= 3 &&
+	    fnic->intr_count >= 1 &&
+	    !pci_enable_msi(fnic->pdev)) {
+
+		fnic->rq_count = 1;
+		fnic->raw_wq_count = 1;
+		fnic->wq_copy_count = 1;
+		fnic->wq_count = 2;
+		fnic->cq_count = 3;
+		fnic->intr_count = 1;
+		fnic->err_intr_offset = 0;
+
+		FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+			     "Using MSI Interrupts\n");
+		vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
+
+		return 0;
+	}
+
+	/*
+	 * Next try INTx
+	 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs
+	 * 1 INTR is used for all 3 queues, 1 INTR for queue errors
+	 * 1 INTR for notification area
+	 */
+
+	if (fnic->rq_count >= 1 &&
+	    fnic->raw_wq_count >= 1 &&
+	    fnic->wq_copy_count >= 1 &&
+	    fnic->cq_count >= 3 &&
+	    fnic->intr_count >= 3) {
+
+		fnic->rq_count = 1;
+		fnic->raw_wq_count = 1;
+		fnic->wq_copy_count = 1;
+		fnic->cq_count = 3;
+		fnic->intr_count = 3;
+
+		FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+			     "Using Legacy Interrupts\n");
+		vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
+
+		return 0;
+	}
+
+	vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+
+	return -EINVAL;
+}
+
+void fnic_clear_intr_mode(struct fnic *fnic)
+{
+	switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+	case VNIC_DEV_INTR_MODE_MSIX:
+		pci_disable_msix(fnic->pdev);
+		break;
+	case VNIC_DEV_INTR_MODE_MSI:
+		pci_disable_msi(fnic->pdev);
+		break;
+	default:
+		break;
+	}
+
+	vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
+}
+
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
new file mode 100644
index 0000000..32ef6b8
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -0,0 +1,942 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "fnic_io.h"
+#include "fnic.h"
+
+#define PCI_DEVICE_ID_CISCO_FNIC	0x0045
+
+/* Timer to poll notification area for events. Used for MSI interrupts */
+#define FNIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
+
+static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
+static struct kmem_cache *fnic_io_req_cache;
+LIST_HEAD(fnic_list);
+DEFINE_SPINLOCK(fnic_list_lock);
+
+/* Supported devices by fnic module */
+static struct pci_device_id fnic_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
+	{ 0, }
+};
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
+	      "Joseph R. Eykholt <jeykholt@cisco.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, fnic_id_table);
+
+unsigned int fnic_log_level;
+module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+
+
+static struct libfc_function_template fnic_transport_template = {
+	.frame_send = fnic_send,
+	.fcp_abort_io = fnic_empty_scsi_cleanup,
+	.fcp_cleanup = fnic_empty_scsi_cleanup,
+	.exch_mgr_reset = fnic_exch_mgr_reset
+};
+
+static int fnic_slave_alloc(struct scsi_device *sdev)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct fc_lport *lp = shost_priv(sdev->host);
+	struct fnic *fnic = lport_priv(lp);
+
+	sdev->tagged_supported = 1;
+
+	if (!rport || fc_remote_port_chkready(rport))
+		return -ENXIO;
+
+	scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
+	rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000;
+
+	return 0;
+}
+
+static struct scsi_host_template fnic_host_template = {
+	.module = THIS_MODULE,
+	.name = DRV_NAME,
+	.queuecommand = fnic_queuecommand,
+	.eh_abort_handler = fnic_abort_cmd,
+	.eh_device_reset_handler = fnic_device_reset,
+	.eh_host_reset_handler = fnic_host_reset,
+	.slave_alloc = fnic_slave_alloc,
+	.change_queue_depth = fc_change_queue_depth,
+	.change_queue_type = fc_change_queue_type,
+	.this_id = -1,
+	.cmd_per_lun = 3,
+	.can_queue = FNIC_MAX_IO_REQ,
+	.use_clustering = ENABLE_CLUSTERING,
+	.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
+	.max_sectors = 0xffff,
+	.shost_attrs = fnic_attrs,
+};
+
+static void fnic_get_host_speed(struct Scsi_Host *shost);
+static struct scsi_transport_template *fnic_fc_transport;
+static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
+
+static struct fc_function_template fnic_fc_functions = {
+
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_active_fc4s = 1,
+	.show_host_maxframe_size = 1,
+	.show_host_port_id = 1,
+	.show_host_supported_speeds = 1,
+	.get_host_speed = fnic_get_host_speed,
+	.show_host_speed = 1,
+	.show_host_port_type = 1,
+	.get_host_port_state = fc_get_host_port_state,
+	.show_host_port_state = 1,
+	.show_host_symbolic_name = 1,
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+	.show_host_fabric_name = 1,
+	.show_starget_node_name = 1,
+	.show_starget_port_name = 1,
+	.show_starget_port_id = 1,
+	.show_rport_dev_loss_tmo = 1,
+	.issue_fc_host_lip = fnic_reset,
+	.get_fc_host_stats = fnic_get_stats,
+	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+	.terminate_rport_io = fnic_terminate_rport_io,
+};
+
+static void fnic_get_host_speed(struct Scsi_Host *shost)
+{
+	struct fc_lport *lp = shost_priv(shost);
+	struct fnic *fnic = lport_priv(lp);
+	u32 port_speed = vnic_dev_port_speed(fnic->vdev);
+
+	/* Add in other values as they get defined in fw */
+	switch (port_speed) {
+	case 10000:
+		fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+		break;
+	default:
+		fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+		break;
+	}
+}
+
+static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
+{
+	int ret;
+	struct fc_lport *lp = shost_priv(host);
+	struct fnic *fnic = lport_priv(lp);
+	struct fc_host_statistics *stats = &lp->host_stats;
+	struct vnic_stats *vs;
+	unsigned long flags;
+
+	if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
+		return stats;
+	fnic->stats_time = jiffies;
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	if (ret) {
+		FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+			      "fnic: Get vnic stats failed"
+			      " 0x%x", ret);
+		return stats;
+	}
+	vs = fnic->stats;
+	stats->tx_frames = vs->tx.tx_unicast_frames_ok;
+	stats->tx_words  = vs->tx.tx_unicast_bytes_ok / 4;
+	stats->rx_frames = vs->rx.rx_unicast_frames_ok;
+	stats->rx_words  = vs->rx.rx_unicast_bytes_ok / 4;
+	stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
+	stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
+	stats->invalid_crc_count = vs->rx.rx_crc_errors;
+	stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
+	stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
+	stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
+
+	return stats;
+}
+
+void fnic_log_q_error(struct fnic *fnic)
+{
+	unsigned int i;
+	u32 error_status;
+
+	for (i = 0; i < fnic->raw_wq_count; i++) {
+		error_status = ioread32(&fnic->wq[i].ctrl->error_status);
+		if (error_status)
+			shost_printk(KERN_ERR, fnic->lport->host,
+				     "WQ[%d] error_status"
+				     " %d\n", i, error_status);
+	}
+
+	for (i = 0; i < fnic->rq_count; i++) {
+		error_status = ioread32(&fnic->rq[i].ctrl->error_status);
+		if (error_status)
+			shost_printk(KERN_ERR, fnic->lport->host,
+				     "RQ[%d] error_status"
+				     " %d\n", i, error_status);
+	}
+
+	for (i = 0; i < fnic->wq_copy_count; i++) {
+		error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
+		if (error_status)
+			shost_printk(KERN_ERR, fnic->lport->host,
+				     "CWQ[%d] error_status"
+				     " %d\n", i, error_status);
+	}
+}
+
+void fnic_handle_link_event(struct fnic *fnic)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	if (fnic->stop_rx_link_events) {
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	queue_work(fnic_event_queue, &fnic->link_work);
+
+}
+
+static int fnic_notify_set(struct fnic *fnic)
+{
+	int err;
+
+	switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+	case VNIC_DEV_INTR_MODE_INTX:
+		err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
+		break;
+	case VNIC_DEV_INTR_MODE_MSI:
+		err = vnic_dev_notify_set(fnic->vdev, -1);
+		break;
+	case VNIC_DEV_INTR_MODE_MSIX:
+		err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
+		break;
+	default:
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Interrupt mode should be set up"
+			     " before devcmd notify set %d\n",
+			     vnic_dev_get_intr_mode(fnic->vdev));
+		err = -1;
+		break;
+	}
+
+	return err;
+}
+
+static void fnic_notify_timer(unsigned long data)
+{
+	struct fnic *fnic = (struct fnic *)data;
+
+	fnic_handle_link_event(fnic);
+	mod_timer(&fnic->notify_timer,
+		  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
+}
+
+static void fnic_notify_timer_start(struct fnic *fnic)
+{
+	switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+	case VNIC_DEV_INTR_MODE_MSI:
+		/*
+		 * Schedule first timeout immediately. The driver is
+		 * initiatialized and ready to look for link up notification
+		 */
+		mod_timer(&fnic->notify_timer, jiffies);
+		break;
+	default:
+		/* Using intr for notification for INTx/MSI-X */
+		break;
+	};
+}
+
+static int fnic_dev_wait(struct vnic_dev *vdev,
+			 int (*start)(struct vnic_dev *, int),
+			 int (*finished)(struct vnic_dev *, int *),
+			 int arg)
+{
+	unsigned long time;
+	int done;
+	int err;
+
+	err = start(vdev, arg);
+	if (err)
+		return err;
+
+	/* Wait for func to complete...2 seconds max */
+	time = jiffies + (HZ * 2);
+	do {
+		err = finished(vdev, &done);
+		if (err)
+			return err;
+		if (done)
+			return 0;
+		schedule_timeout_uninterruptible(HZ / 10);
+	} while (time_after(time, jiffies));
+
+	return -ETIMEDOUT;
+}
+
+static int fnic_cleanup(struct fnic *fnic)
+{
+	unsigned int i;
+	int err;
+	unsigned long flags;
+	struct fc_frame *flogi = NULL;
+	struct fc_frame *flogi_resp = NULL;
+
+	vnic_dev_disable(fnic->vdev);
+	for (i = 0; i < fnic->intr_count; i++)
+		vnic_intr_mask(&fnic->intr[i]);
+
+	for (i = 0; i < fnic->rq_count; i++) {
+		err = vnic_rq_disable(&fnic->rq[i]);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < fnic->raw_wq_count; i++) {
+		err = vnic_wq_disable(&fnic->wq[i]);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < fnic->wq_copy_count; i++) {
+		err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
+		if (err)
+			return err;
+	}
+
+	/* Clean up completed IOs and FCS frames */
+	fnic_wq_copy_cmpl_handler(fnic, -1);
+	fnic_wq_cmpl_handler(fnic, -1);
+	fnic_rq_cmpl_handler(fnic, -1);
+
+	/* Clean up the IOs and FCS frames that have not completed */
+	for (i = 0; i < fnic->raw_wq_count; i++)
+		vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
+	for (i = 0; i < fnic->rq_count; i++)
+		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
+	for (i = 0; i < fnic->wq_copy_count; i++)
+		vnic_wq_copy_clean(&fnic->wq_copy[i],
+				   fnic_wq_copy_cleanup_handler);
+
+	for (i = 0; i < fnic->cq_count; i++)
+		vnic_cq_clean(&fnic->cq[i]);
+	for (i = 0; i < fnic->intr_count; i++)
+		vnic_intr_clean(&fnic->intr[i]);
+
+	/*
+	 * Remove cached flogi and flogi resp frames if any
+	 * These frames are not in any queue, and therefore queue
+	 * cleanup does not clean them. So clean them explicitly
+	 */
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	flogi = fnic->flogi;
+	fnic->flogi = NULL;
+	flogi_resp = fnic->flogi_resp;
+	fnic->flogi_resp = NULL;
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	if (flogi)
+		dev_kfree_skb(fp_skb(flogi));
+
+	if (flogi_resp)
+		dev_kfree_skb(fp_skb(flogi_resp));
+
+	mempool_destroy(fnic->io_req_pool);
+	for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
+		mempool_destroy(fnic->io_sgl_pool[i]);
+
+	return 0;
+}
+
+static void fnic_iounmap(struct fnic *fnic)
+{
+	if (fnic->bar0.vaddr)
+		iounmap(fnic->bar0.vaddr);
+}
+
+/*
+ * Allocate element for mempools requiring GFP_DMA flag.
+ * Otherwise, checks in kmem_flagcheck() hit BUG_ON().
+ */
+static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
+{
+	struct kmem_cache *mem = pool_data;
+
+	return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
+}
+
+static int __devinit fnic_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	struct Scsi_Host *host;
+	struct fc_lport *lp;
+	struct fnic *fnic;
+	mempool_t *pool;
+	int err;
+	int i;
+	unsigned long flags;
+
+	/*
+	 * Allocate SCSI Host and set up association between host,
+	 * local port, and fnic
+	 */
+	host = scsi_host_alloc(&fnic_host_template,
+			       sizeof(struct fc_lport) + sizeof(struct fnic));
+	if (!host) {
+		printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+	lp = shost_priv(host);
+	lp->host = host;
+	fnic = lport_priv(lp);
+	fnic->lport = lp;
+
+	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
+		 host->host_no);
+
+	host->transportt = fnic_fc_transport;
+
+	err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Unable to alloc shared tag map\n");
+		goto err_out_free_hba;
+	}
+
+	/* Setup PCI resources */
+	pci_set_drvdata(pdev, fnic);
+
+	fnic->pdev = pdev;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Cannot enable PCI device, aborting.\n");
+		goto err_out_free_hba;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Cannot enable PCI resources, aborting\n");
+		goto err_out_disable_device;
+	}
+
+	pci_set_master(pdev);
+
+	/* Query PCI controller on system for DMA addressing
+	 * limitation for the device.  Try 40-bit first, and
+	 * fail to 32-bit.
+	 */
+	err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
+	if (err) {
+		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (err) {
+			shost_printk(KERN_ERR, fnic->lport->host,
+				     "No usable DMA configuration "
+				     "aborting\n");
+			goto err_out_release_regions;
+		}
+		err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+		if (err) {
+			shost_printk(KERN_ERR, fnic->lport->host,
+				     "Unable to obtain 32-bit DMA "
+				     "for consistent allocations, aborting.\n");
+			goto err_out_release_regions;
+		}
+	} else {
+		err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
+		if (err) {
+			shost_printk(KERN_ERR, fnic->lport->host,
+				     "Unable to obtain 40-bit DMA "
+				     "for consistent allocations, aborting.\n");
+			goto err_out_release_regions;
+		}
+	}
+
+	/* Map vNIC resources from BAR0 */
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "BAR0 not memory-map'able, aborting.\n");
+		err = -ENODEV;
+		goto err_out_release_regions;
+	}
+
+	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
+	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
+	fnic->bar0.len = pci_resource_len(pdev, 0);
+
+	if (!fnic->bar0.vaddr) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Cannot memory-map BAR0 res hdr, "
+			     "aborting.\n");
+		err = -ENODEV;
+		goto err_out_release_regions;
+	}
+
+	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
+	if (!fnic->vdev) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "vNIC registration failed, "
+			     "aborting.\n");
+		err = -ENODEV;
+		goto err_out_iounmap;
+	}
+
+	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
+			    vnic_dev_open_done, 0);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "vNIC dev open failed, aborting.\n");
+		goto err_out_vnic_unregister;
+	}
+
+	err = vnic_dev_init(fnic->vdev, 0);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "vNIC dev init failed, aborting.\n");
+		goto err_out_dev_close;
+	}
+
+	err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "vNIC get MAC addr failed \n");
+		goto err_out_dev_close;
+	}
+
+	/* Get vNIC configuration */
+	err = fnic_get_vnic_config(fnic);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Get vNIC configuration failed, "
+			     "aborting.\n");
+		goto err_out_dev_close;
+	}
+	host->max_lun = fnic->config.luns_per_tgt;
+	host->max_id = FNIC_MAX_FCP_TARGET;
+
+	fnic_get_res_counts(fnic);
+
+	err = fnic_set_intr_mode(fnic);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Failed to set intr mode, "
+			     "aborting.\n");
+		goto err_out_dev_close;
+	}
+
+	err = fnic_request_intr(fnic);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Unable to request irq.\n");
+		goto err_out_clear_intr;
+	}
+
+	err = fnic_alloc_vnic_resources(fnic);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Failed to alloc vNIC resources, "
+			     "aborting.\n");
+		goto err_out_free_intr;
+	}
+
+
+	/* initialize all fnic locks */
+	spin_lock_init(&fnic->fnic_lock);
+
+	for (i = 0; i < FNIC_WQ_MAX; i++)
+		spin_lock_init(&fnic->wq_lock[i]);
+
+	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
+		spin_lock_init(&fnic->wq_copy_lock[i]);
+		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
+		fnic->fw_ack_recd[i] = 0;
+		fnic->fw_ack_index[i] = -1;
+	}
+
+	for (i = 0; i < FNIC_IO_LOCKS; i++)
+		spin_lock_init(&fnic->io_req_lock[i]);
+
+	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
+	if (!fnic->io_req_pool)
+		goto err_out_free_resources;
+
+	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
+			      fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+	if (!pool)
+		goto err_out_free_ioreq_pool;
+	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
+
+	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
+			      fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+	if (!pool)
+		goto err_out_free_dflt_pool;
+	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
+
+	/* setup vlan config, hw inserts vlan header */
+	fnic->vlan_hw_insert = 1;
+	fnic->vlan_id = 0;
+
+	fnic->flogi_oxid = FC_XID_UNKNOWN;
+	fnic->flogi = NULL;
+	fnic->flogi_resp = NULL;
+	fnic->state = FNIC_IN_FC_MODE;
+
+	/* Enable hardware stripping of vlan header on ingress */
+	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
+
+	/* Setup notification buffer area */
+	err = fnic_notify_set(fnic);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Failed to alloc notify buffer, aborting.\n");
+		goto err_out_free_max_pool;
+	}
+
+	/* Setup notify timer when using MSI interrupts */
+	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
+		setup_timer(&fnic->notify_timer,
+			    fnic_notify_timer, (unsigned long)fnic);
+
+	/* allocate RQ buffers and post them to RQ*/
+	for (i = 0; i < fnic->rq_count; i++) {
+		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
+		if (err) {
+			shost_printk(KERN_ERR, fnic->lport->host,
+				     "fnic_alloc_rq_frame can't alloc "
+				     "frame\n");
+			goto err_out_free_rq_buf;
+		}
+	}
+
+	/*
+	 * Initialization done with PCI system, hardware, firmware.
+	 * Add host to SCSI
+	 */
+	err = scsi_add_host(lp->host, &pdev->dev);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "fnic: scsi_add_host failed...exiting\n");
+		goto err_out_free_rq_buf;
+	}
+
+	/* Start local port initiatialization */
+
+	lp->link_up = 0;
+	lp->tt = fnic_transport_template;
+
+	lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
+				    FCPIO_HOST_EXCH_RANGE_START,
+				    FCPIO_HOST_EXCH_RANGE_END);
+	if (!lp->emp) {
+		err = -ENOMEM;
+		goto err_out_remove_scsi_host;
+	}
+
+	lp->max_retry_count = fnic->config.flogi_retries;
+	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+			      FCP_SPPF_CONF_COMPL);
+	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
+		lp->service_params |= FCP_SPPF_RETRY;
+
+	lp->boot_time = jiffies;
+	lp->e_d_tov = fnic->config.ed_tov;
+	lp->r_a_tov = fnic->config.ra_tov;
+	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
+	fc_set_wwnn(lp, fnic->config.node_wwn);
+	fc_set_wwpn(lp, fnic->config.port_wwn);
+
+	fc_exch_init(lp);
+	fc_lport_init(lp);
+	fc_elsct_init(lp);
+	fc_rport_init(lp);
+	fc_disc_init(lp);
+
+	fc_lport_config(lp);
+
+	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
+		       sizeof(struct fc_frame_header))) {
+		err = -EINVAL;
+		goto err_out_free_exch_mgr;
+	}
+	fc_host_maxframe_size(lp->host) = lp->mfs;
+
+	sprintf(fc_host_symbolic_name(lp->host),
+		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
+
+	spin_lock_irqsave(&fnic_list_lock, flags);
+	list_add_tail(&fnic->list, &fnic_list);
+	spin_unlock_irqrestore(&fnic_list_lock, flags);
+
+	INIT_WORK(&fnic->link_work, fnic_handle_link);
+	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
+	skb_queue_head_init(&fnic->frame_queue);
+
+	/* Enable all queues */
+	for (i = 0; i < fnic->raw_wq_count; i++)
+		vnic_wq_enable(&fnic->wq[i]);
+	for (i = 0; i < fnic->rq_count; i++)
+		vnic_rq_enable(&fnic->rq[i]);
+	for (i = 0; i < fnic->wq_copy_count; i++)
+		vnic_wq_copy_enable(&fnic->wq_copy[i]);
+
+	fc_fabric_login(lp);
+
+	vnic_dev_enable(fnic->vdev);
+	for (i = 0; i < fnic->intr_count; i++)
+		vnic_intr_unmask(&fnic->intr[i]);
+
+	fnic_notify_timer_start(fnic);
+
+	return 0;
+
+err_out_free_exch_mgr:
+	fc_exch_mgr_free(lp->emp);
+err_out_remove_scsi_host:
+	fc_remove_host(fnic->lport->host);
+	scsi_remove_host(fnic->lport->host);
+err_out_free_rq_buf:
+	for (i = 0; i < fnic->rq_count; i++)
+		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
+	vnic_dev_notify_unset(fnic->vdev);
+err_out_free_max_pool:
+	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
+err_out_free_dflt_pool:
+	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
+err_out_free_ioreq_pool:
+	mempool_destroy(fnic->io_req_pool);
+err_out_free_resources:
+	fnic_free_vnic_resources(fnic);
+err_out_free_intr:
+	fnic_free_intr(fnic);
+err_out_clear_intr:
+	fnic_clear_intr_mode(fnic);
+err_out_dev_close:
+	vnic_dev_close(fnic->vdev);
+err_out_vnic_unregister:
+	vnic_dev_unregister(fnic->vdev);
+err_out_iounmap:
+	fnic_iounmap(fnic);
+err_out_release_regions:
+	pci_release_regions(pdev);
+err_out_disable_device:
+	pci_disable_device(pdev);
+err_out_free_hba:
+	scsi_host_put(lp->host);
+err_out:
+	return err;
+}
+
+static void __devexit fnic_remove(struct pci_dev *pdev)
+{
+	struct fnic *fnic = pci_get_drvdata(pdev);
+	unsigned long flags;
+
+	/*
+	 * Mark state so that the workqueue thread stops forwarding
+	 * received frames and link events to the local port. ISR and
+	 * other threads that can queue work items will also stop
+	 * creating work items on the fnic workqueue
+	 */
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	fnic->stop_rx_link_events = 1;
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
+		del_timer_sync(&fnic->notify_timer);
+
+	/*
+	 * Flush the fnic event queue. After this call, there should
+	 * be no event queued for this fnic device in the workqueue
+	 */
+	flush_workqueue(fnic_event_queue);
+	skb_queue_purge(&fnic->frame_queue);
+
+	/*
+	 * Log off the fabric. This stops all remote ports, dns port,
+	 * logs off the fabric. This flushes all rport, disc, lport work
+	 * before returning
+	 */
+	fc_fabric_logoff(fnic->lport);
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	fnic->in_remove = 1;
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	fc_lport_destroy(fnic->lport);
+
+	/*
+	 * This stops the fnic device, masks all interrupts. Completed
+	 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
+	 * cleaned up
+	 */
+	fnic_cleanup(fnic);
+
+	BUG_ON(!skb_queue_empty(&fnic->frame_queue));
+
+	spin_lock_irqsave(&fnic_list_lock, flags);
+	list_del(&fnic->list);
+	spin_unlock_irqrestore(&fnic_list_lock, flags);
+
+	fc_remove_host(fnic->lport->host);
+	scsi_remove_host(fnic->lport->host);
+	fc_exch_mgr_free(fnic->lport->emp);
+	vnic_dev_notify_unset(fnic->vdev);
+	fnic_free_vnic_resources(fnic);
+	fnic_free_intr(fnic);
+	fnic_clear_intr_mode(fnic);
+	vnic_dev_close(fnic->vdev);
+	vnic_dev_unregister(fnic->vdev);
+	fnic_iounmap(fnic);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	scsi_host_put(fnic->lport->host);
+}
+
+static struct pci_driver fnic_driver = {
+	.name = DRV_NAME,
+	.id_table = fnic_id_table,
+	.probe = fnic_probe,
+	.remove = __devexit_p(fnic_remove),
+};
+
+static int __init fnic_init_module(void)
+{
+	size_t len;
+	int err = 0;
+
+	printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+
+	/* Create a cache for allocation of default size sgls */
+	len = sizeof(struct fnic_dflt_sgl_list);
+	fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
+		("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
+		 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
+		 NULL);
+	if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
+		printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
+		err = -ENOMEM;
+		goto err_create_fnic_sgl_slab_dflt;
+	}
+
+	/* Create a cache for allocation of max size sgls*/
+	len = sizeof(struct fnic_sgl_list);
+	fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
+		("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
+		 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
+		 NULL);
+	if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
+		printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
+		err = -ENOMEM;
+		goto err_create_fnic_sgl_slab_max;
+	}
+
+	/* Create a cache of io_req structs for use via mempool */
+	fnic_io_req_cache = kmem_cache_create("fnic_io_req",
+					      sizeof(struct fnic_io_req),
+					      0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!fnic_io_req_cache) {
+		printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
+		err = -ENOMEM;
+		goto err_create_fnic_ioreq_slab;
+	}
+
+	fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
+	if (!fnic_event_queue) {
+		printk(KERN_ERR PFX "fnic work queue create failed\n");
+		err = -ENOMEM;
+		goto err_create_fnic_workq;
+	}
+
+	spin_lock_init(&fnic_list_lock);
+	INIT_LIST_HEAD(&fnic_list);
+
+	fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
+	if (!fnic_fc_transport) {
+		printk(KERN_ERR PFX "fc_attach_transport error\n");
+		err = -ENOMEM;
+		goto err_fc_transport;
+	}
+
+	/* register the driver with PCI system */
+	err = pci_register_driver(&fnic_driver);
+	if (err < 0) {
+		printk(KERN_ERR PFX "pci register error\n");
+		goto err_pci_register;
+	}
+	return err;
+
+err_pci_register:
+	fc_release_transport(fnic_fc_transport);
+err_fc_transport:
+	destroy_workqueue(fnic_event_queue);
+err_create_fnic_workq:
+	kmem_cache_destroy(fnic_io_req_cache);
+err_create_fnic_ioreq_slab:
+	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+err_create_fnic_sgl_slab_max:
+	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+err_create_fnic_sgl_slab_dflt:
+	return err;
+}
+
+static void __exit fnic_cleanup_module(void)
+{
+	pci_unregister_driver(&fnic_driver);
+	destroy_workqueue(fnic_event_queue);
+	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+	kmem_cache_destroy(fnic_io_req_cache);
+	fc_release_transport(fnic_fc_transport);
+}
+
+module_init(fnic_init_module);
+module_exit(fnic_cleanup_module);
+
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
new file mode 100644
index 0000000..7ba61ec
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "fnic.h"
+
+int fnic_get_vnic_config(struct fnic *fnic)
+{
+	struct vnic_fc_config *c = &fnic->config;
+	int err;
+
+#define GET_CONFIG(m) \
+	do { \
+		err = vnic_dev_spec(fnic->vdev, \
+				    offsetof(struct vnic_fc_config, m), \
+				    sizeof(c->m), &c->m); \
+		if (err) { \
+			shost_printk(KERN_ERR, fnic->lport->host, \
+				     "Error getting %s, %d\n", #m, \
+				     err); \
+			return err; \
+		} \
+	} while (0);
+
+	GET_CONFIG(node_wwn);
+	GET_CONFIG(port_wwn);
+	GET_CONFIG(wq_enet_desc_count);
+	GET_CONFIG(wq_copy_desc_count);
+	GET_CONFIG(rq_desc_count);
+	GET_CONFIG(maxdatafieldsize);
+	GET_CONFIG(ed_tov);
+	GET_CONFIG(ra_tov);
+	GET_CONFIG(intr_timer);
+	GET_CONFIG(intr_timer_type);
+	GET_CONFIG(flags);
+	GET_CONFIG(flogi_retries);
+	GET_CONFIG(flogi_timeout);
+	GET_CONFIG(plogi_retries);
+	GET_CONFIG(plogi_timeout);
+	GET_CONFIG(io_throttle_count);
+	GET_CONFIG(link_down_timeout);
+	GET_CONFIG(port_down_timeout);
+	GET_CONFIG(port_down_io_retries);
+	GET_CONFIG(luns_per_tgt);
+
+	c->wq_enet_desc_count =
+		min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
+		      max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
+			    c->wq_enet_desc_count));
+	c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
+
+	c->wq_copy_desc_count =
+		min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX,
+		      max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN,
+			    c->wq_copy_desc_count));
+	c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);
+
+	c->rq_desc_count =
+		min_t(u32, VNIC_FNIC_RQ_DESCS_MAX,
+		      max_t(u32, VNIC_FNIC_RQ_DESCS_MIN,
+			    c->rq_desc_count));
+	c->rq_desc_count = ALIGN(c->rq_desc_count, 16);
+
+	c->maxdatafieldsize =
+		min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX,
+		      max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN,
+			    c->maxdatafieldsize));
+	c->ed_tov =
+		min_t(u32, VNIC_FNIC_EDTOV_MAX,
+		      max_t(u32, VNIC_FNIC_EDTOV_MIN,
+			    c->ed_tov));
+
+	c->ra_tov =
+		min_t(u32, VNIC_FNIC_RATOV_MAX,
+		      max_t(u32, VNIC_FNIC_RATOV_MIN,
+			    c->ra_tov));
+
+	c->flogi_retries =
+		min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries);
+
+	c->flogi_timeout =
+		min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX,
+		      max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN,
+			    c->flogi_timeout));
+
+	c->plogi_retries =
+		min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries);
+
+	c->plogi_timeout =
+		min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX,
+		      max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN,
+			    c->plogi_timeout));
+
+	c->io_throttle_count =
+		min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX,
+		      max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN,
+			    c->io_throttle_count));
+
+	c->link_down_timeout =
+		min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX,
+		      c->link_down_timeout);
+
+	c->port_down_timeout =
+		min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX,
+		      c->port_down_timeout);
+
+	c->port_down_io_retries =
+		min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX,
+		      c->port_down_io_retries);
+
+	c->luns_per_tgt =
+		min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX,
+		      max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN,
+			    c->luns_per_tgt));
+
+	c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
+	c->intr_timer_type = c->intr_timer_type;
+
+	shost_printk(KERN_INFO, fnic->lport->host,
+		     "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
+		     "wq/wq_copy/rq %d/%d/%d\n",
+		     fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2],
+		     fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5],
+		     c->wq_enet_desc_count, c->wq_copy_desc_count,
+		     c->rq_desc_count);
+	shost_printk(KERN_INFO, fnic->lport->host,
+		     "vNIC node wwn %llx port wwn %llx\n",
+		     c->node_wwn, c->port_wwn);
+	shost_printk(KERN_INFO, fnic->lport->host,
+		     "vNIC ed_tov %d ra_tov %d\n",
+		     c->ed_tov, c->ra_tov);
+	shost_printk(KERN_INFO, fnic->lport->host,
+		     "vNIC mtu %d intr timer %d\n",
+		     c->maxdatafieldsize, c->intr_timer);
+	shost_printk(KERN_INFO, fnic->lport->host,
+		     "vNIC flags 0x%x luns per tgt %d\n",
+		     c->flags, c->luns_per_tgt);
+	shost_printk(KERN_INFO, fnic->lport->host,
+		     "vNIC flogi_retries %d flogi timeout %d\n",
+		     c->flogi_retries, c->flogi_timeout);
+	shost_printk(KERN_INFO, fnic->lport->host,
+		     "vNIC plogi retries %d plogi timeout %d\n",
+		     c->plogi_retries, c->plogi_timeout);
+	shost_printk(KERN_INFO, fnic->lport->host,
+		     "vNIC io throttle count %d link dn timeout %d\n",
+		     c->io_throttle_count, c->link_down_timeout);
+	shost_printk(KERN_INFO, fnic->lport->host,
+		     "vNIC port dn io retries %d port dn timeout %d\n",
+		     c->port_down_io_retries, c->port_down_timeout);
+
+	return 0;
+}
+
+int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
+			u8 rss_hash_type,
+			u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable,
+			u8 tso_ipid_split_en, u8 ig_vlan_strip_en)
+{
+	u64 a0, a1;
+	u32 nic_cfg;
+	int wait = 1000;
+
+	vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+		rss_hash_type, rss_hash_bits, rss_base_cpu,
+		rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+	a0 = nic_cfg;
+	a1 = 0;
+
+	return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+}
+
+void fnic_get_res_counts(struct fnic *fnic)
+{
+	fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
+	fnic->raw_wq_count = fnic->wq_count - 1;
+	fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
+	fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
+	fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
+	fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
+		RES_TYPE_INTR_CTRL);
+}
+
+void fnic_free_vnic_resources(struct fnic *fnic)
+{
+	unsigned int i;
+
+	for (i = 0; i < fnic->raw_wq_count; i++)
+		vnic_wq_free(&fnic->wq[i]);
+
+	for (i = 0; i < fnic->wq_copy_count; i++)
+		vnic_wq_copy_free(&fnic->wq_copy[i]);
+
+	for (i = 0; i < fnic->rq_count; i++)
+		vnic_rq_free(&fnic->rq[i]);
+
+	for (i = 0; i < fnic->cq_count; i++)
+		vnic_cq_free(&fnic->cq[i]);
+
+	for (i = 0; i < fnic->intr_count; i++)
+		vnic_intr_free(&fnic->intr[i]);
+}
+
+int fnic_alloc_vnic_resources(struct fnic *fnic)
+{
+	enum vnic_dev_intr_mode intr_mode;
+	unsigned int mask_on_assertion;
+	unsigned int interrupt_offset;
+	unsigned int error_interrupt_enable;
+	unsigned int error_interrupt_offset;
+	unsigned int i, cq_index;
+	unsigned int wq_copy_cq_desc_count;
+	int err;
+
+	intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
+
+	shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
+		     intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
+		     intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
+		     intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
+		     "MSI-X" : "unknown");
+
+	shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
+		     "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
+		     fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
+		     fnic->rq_count, fnic->cq_count, fnic->intr_count);
+
+	/* Allocate Raw WQ used for FCS frames */
+	for (i = 0; i < fnic->raw_wq_count; i++) {
+		err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
+			fnic->config.wq_enet_desc_count,
+			sizeof(struct wq_enet_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* Allocate Copy WQs used for SCSI IOs */
+	for (i = 0; i < fnic->wq_copy_count; i++) {
+		err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
+			(fnic->raw_wq_count + i),
+			fnic->config.wq_copy_desc_count,
+			sizeof(struct fcpio_host_req));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* RQ for receiving FCS frames */
+	for (i = 0; i < fnic->rq_count; i++) {
+		err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
+			fnic->config.rq_desc_count,
+			sizeof(struct rq_enet_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* CQ for each RQ */
+	for (i = 0; i < fnic->rq_count; i++) {
+		cq_index = i;
+		err = vnic_cq_alloc(fnic->vdev,
+			&fnic->cq[cq_index], cq_index,
+			fnic->config.rq_desc_count,
+			sizeof(struct cq_enet_rq_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* CQ for each WQ */
+	for (i = 0; i < fnic->raw_wq_count; i++) {
+		cq_index = fnic->rq_count + i;
+		err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
+			fnic->config.wq_enet_desc_count,
+			sizeof(struct cq_enet_wq_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* CQ for each COPY WQ */
+	wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
+	for (i = 0; i < fnic->wq_copy_count; i++) {
+		cq_index = fnic->raw_wq_count + fnic->rq_count + i;
+		err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
+			cq_index,
+			wq_copy_cq_desc_count,
+			sizeof(struct fcpio_fw_req));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	for (i = 0; i < fnic->intr_count; i++) {
+		err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
+				RES_TYPE_INTR_PBA_LEGACY, 0);
+
+	if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Failed to hook legacy pba resource\n");
+		err = -ENODEV;
+		goto err_out_cleanup;
+	}
+
+	/*
+	 * Init RQ/WQ resources.
+	 *
+	 * RQ[0 to n-1] point to CQ[0 to n-1]
+	 * WQ[0 to m-1] point to CQ[n to n+m-1]
+	 * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
+	 *
+	 * Note for copy wq we always initialize with cq_index = 0
+	 *
+	 * Error interrupt is not enabled for MSI.
+	 */
+
+	switch (intr_mode) {
+	case VNIC_DEV_INTR_MODE_INTX:
+	case VNIC_DEV_INTR_MODE_MSIX:
+		error_interrupt_enable = 1;
+		error_interrupt_offset = fnic->err_intr_offset;
+		break;
+	default:
+		error_interrupt_enable = 0;
+		error_interrupt_offset = 0;
+		break;
+	}
+
+	for (i = 0; i < fnic->rq_count; i++) {
+		cq_index = i;
+		vnic_rq_init(&fnic->rq[i],
+			     cq_index,
+			     error_interrupt_enable,
+			     error_interrupt_offset);
+	}
+
+	for (i = 0; i < fnic->raw_wq_count; i++) {
+		cq_index = i + fnic->rq_count;
+		vnic_wq_init(&fnic->wq[i],
+			     cq_index,
+			     error_interrupt_enable,
+			     error_interrupt_offset);
+	}
+
+	for (i = 0; i < fnic->wq_copy_count; i++) {
+		vnic_wq_copy_init(&fnic->wq_copy[i],
+				  0 /* cq_index 0 - always */,
+				  error_interrupt_enable,
+				  error_interrupt_offset);
+	}
+
+	for (i = 0; i < fnic->cq_count; i++) {
+
+		switch (intr_mode) {
+		case VNIC_DEV_INTR_MODE_MSIX:
+			interrupt_offset = i;
+			break;
+		default:
+			interrupt_offset = 0;
+			break;
+		}
+
+		vnic_cq_init(&fnic->cq[i],
+			0 /* flow_control_enable */,
+			1 /* color_enable */,
+			0 /* cq_head */,
+			0 /* cq_tail */,
+			1 /* cq_tail_color */,
+			1 /* interrupt_enable */,
+			1 /* cq_entry_enable */,
+			0 /* cq_message_enable */,
+			interrupt_offset,
+			0 /* cq_message_addr */);
+	}
+
+	/*
+	 * Init INTR resources
+	 *
+	 * mask_on_assertion is not used for INTx due to the level-
+	 * triggered nature of INTx
+	 */
+
+	switch (intr_mode) {
+	case VNIC_DEV_INTR_MODE_MSI:
+	case VNIC_DEV_INTR_MODE_MSIX:
+		mask_on_assertion = 1;
+		break;
+	default:
+		mask_on_assertion = 0;
+		break;
+	}
+
+	for (i = 0; i < fnic->intr_count; i++) {
+		vnic_intr_init(&fnic->intr[i],
+			fnic->config.intr_timer,
+			fnic->config.intr_timer_type,
+			mask_on_assertion);
+	}
+
+	/* init the stats memory by making the first call here */
+	err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "vnic_dev_stats_dump failed - x%x\n", err);
+		goto err_out_cleanup;
+	}
+
+	/* Clear LIF stats */
+	vnic_dev_stats_clear(fnic->vdev);
+
+	return 0;
+
+err_out_cleanup:
+	fnic_free_vnic_resources(fnic);
+
+	return err;
+}
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
new file mode 100644
index 0000000..b6f3102
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_RES_H_
+#define _FNIC_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "fnic_io.h"
+#include "fcpio.h"
+#include "vnic_wq_copy.h"
+#include "vnic_cq_copy.h"
+
+static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
+				      void *os_buf, dma_addr_t dma_addr,
+				      unsigned int len, unsigned int fc_eof,
+				      int vlan_tag_insert,
+				      unsigned int vlan_tag,
+				      int cq_entry, int sop, int eop)
+{
+	struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+
+	wq_enet_desc_enc(desc,
+			 (u64)dma_addr | VNIC_PADDR_TARGET,
+			 (u16)len,
+			 0, /* mss_or_csum_offset */
+			 (u16)fc_eof,
+			 0, /* offload_mode */
+			 (u8)eop, (u8)cq_entry,
+			 1, /* fcoe_encap */
+			 (u8)vlan_tag_insert,
+			 (u16)vlan_tag,
+			 0 /* loopback */);
+
+	vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+}
+
+static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
+						    u32 req_id,
+						    u32 lunmap_id, u8 spl_flags,
+						    u32 sgl_cnt, u32 sense_len,
+						    u64 sgl_addr, u64 sns_addr,
+						    u8 crn, u8 pri_ta,
+						    u8 flags, u8 *scsi_cdb,
+						    u32 data_len, u8 *lun,
+						    u32 d_id, u16 mss,
+						    u32 ratov, u32 edtov)
+{
+	struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+	desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */
+	desc->hdr.status = 0;            /* header status entry */
+	desc->hdr._resvd = 0;            /* reserved */
+	desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+	desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */
+	desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */
+	desc->u.icmnd_16._resvd0[0] = 0;        /* reserved */
+	desc->u.icmnd_16._resvd0[1] = 0;        /* reserved */
+	desc->u.icmnd_16._resvd0[2] = 0;        /* reserved */
+	desc->u.icmnd_16.sgl_cnt = sgl_cnt;     /* scatter-gather list count */
+	desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */
+	desc->u.icmnd_16.sgl_addr = sgl_addr;   /* scatter-gather list addr */
+	desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */
+	desc->u.icmnd_16.crn = crn;             /* SCSI Command Reference No.*/
+	desc->u.icmnd_16.pri_ta = pri_ta; 	/* SCSI Pri & Task attribute */
+	desc->u.icmnd_16._resvd1 = 0;           /* reserved: should be 0 */
+	desc->u.icmnd_16.flags = flags;         /* command flags */
+	memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16);    /* SCSI CDB */
+	desc->u.icmnd_16.data_len = data_len;   /* length of data expected */
+	memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS);  /* LUN address */
+	desc->u.icmnd_16._resvd2 = 0;          	/* reserved */
+	hton24(desc->u.icmnd_16.d_id, d_id);	/* FC vNIC only: Target D_ID */
+	desc->u.icmnd_16.mss = mss;            	/* FC vNIC only: max burst */
+	desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */
+	desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */
+
+	vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq,
+						u32 req_id, u32 lunmap_id,
+						u32 tm_req, u32 tm_id, u8 *lun,
+						u32 d_id, u32 r_a_tov,
+						u32 e_d_tov)
+{
+	struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+	desc->hdr.type = FCPIO_ITMF;     /* enum fcpio_type */
+	desc->hdr.status = 0;            /* header status entry */
+	desc->hdr._resvd = 0;            /* reserved */
+	desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+	desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */
+	desc->u.itmf.tm_req = tm_req;       /* SCSI Task Management request */
+	desc->u.itmf.t_tag = tm_id;         /* tag of fcpio to be aborted */
+	desc->u.itmf._resvd = 0;
+	memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS);  /* LUN address */
+	desc->u.itmf._resvd1 = 0;
+	hton24(desc->u.itmf.d_id, d_id);    /* FC vNIC only: Target D_ID */
+	desc->u.itmf.r_a_tov = r_a_tov;     /* FC vNIC only: R_A_TOV in msec */
+	desc->u.itmf.e_d_tov = e_d_tov;     /* FC vNIC only: E_D_TOV in msec */
+
+	vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
+						     u32 req_id, u8 format,
+						     u32 s_id, u8 *gw_mac)
+{
+	struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+	desc->hdr.type = FCPIO_FLOGI_REG;     /* enum fcpio_type */
+	desc->hdr.status = 0;                 /* header status entry */
+	desc->hdr._resvd = 0;                 /* reserved */
+	desc->hdr.tag.u.req_id = req_id;      /* id for this request */
+
+	desc->u.flogi_reg.format = format;
+	hton24(desc->u.flogi_reg.s_id, s_id);
+	memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
+
+	vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
+						    u32 req_id)
+{
+	struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+	desc->hdr.type = FCPIO_RESET;     /* enum fcpio_type */
+	desc->hdr.status = 0;             /* header status entry */
+	desc->hdr._resvd = 0;             /* reserved */
+	desc->hdr.tag.u.req_id = req_id;  /* id for this request */
+
+	vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq,
+						  u32 req_id, u64 lunmap_addr,
+						  u32 lunmap_len)
+{
+	struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+	desc->hdr.type = FCPIO_LUNMAP_REQ;	/* enum fcpio_type */
+	desc->hdr.status = 0;			/* header status entry */
+	desc->hdr._resvd = 0;			/* reserved */
+	desc->hdr.tag.u.req_id = req_id;	/* id for this request */
+
+	desc->u.lunmap_req.addr = lunmap_addr;	/* address of the buffer */
+	desc->u.lunmap_req.len = lunmap_len;	/* len of the buffer */
+
+	vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
+				      void *os_buf, dma_addr_t dma_addr,
+				      u16 len)
+{
+	struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+
+	rq_enet_desc_enc(desc,
+		(u64)dma_addr | VNIC_PADDR_TARGET,
+		RQ_ENET_TYPE_ONLY_SOP,
+		(u16)len);
+
+	vnic_rq_post(rq, os_buf, 0, dma_addr, len);
+}
+
+
+struct fnic;
+
+int fnic_get_vnic_config(struct fnic *);
+int fnic_alloc_vnic_resources(struct fnic *);
+void fnic_free_vnic_resources(struct fnic *);
+void fnic_get_res_counts(struct fnic *);
+int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
+			u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu,
+			u8 rss_enable, u8 tso_ipid_split_en,
+			u8 ig_vlan_strip_en);
+
+#endif /* _FNIC_RES_H_ */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
new file mode 100644
index 0000000..eabf365
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -0,0 +1,1850 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+const char *fnic_state_str[] = {
+	[FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
+	[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
+	[FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
+	[FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
+};
+
+static const char *fnic_ioreq_state_str[] = {
+	[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
+	[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
+	[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
+	[FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
+};
+
+static const char *fcpio_status_str[] =  {
+	[FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
+	[FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
+	[FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
+	[FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
+	[FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
+	[FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
+	[FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
+	[FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
+	[FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
+	[FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
+	[FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
+	[FCPIO_FW_ERR] = "FCPIO_FW_ERR",
+	[FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
+	[FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
+	[FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
+	[FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
+	[FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
+	[FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
+	[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
+};
+
+const char *fnic_state_to_str(unsigned int state)
+{
+	if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
+		return "unknown";
+
+	return fnic_state_str[state];
+}
+
+static const char *fnic_ioreq_state_to_str(unsigned int state)
+{
+	if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
+	    !fnic_ioreq_state_str[state])
+		return "unknown";
+
+	return fnic_ioreq_state_str[state];
+}
+
+static const char *fnic_fcpio_status_to_str(unsigned int status)
+{
+	if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
+		return "unknown";
+
+	return fcpio_status_str[status];
+}
+
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
+
+static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
+					    struct scsi_cmnd *sc)
+{
+	u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
+
+	return &fnic->io_req_lock[hash];
+}
+
+/*
+ * Unmap the data buffer and sense buffer for an io_req,
+ * also unmap and free the device-private scatter/gather list.
+ */
+static void fnic_release_ioreq_buf(struct fnic *fnic,
+				   struct fnic_io_req *io_req,
+				   struct scsi_cmnd *sc)
+{
+	if (io_req->sgl_list_pa)
+		pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+				 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
+				 PCI_DMA_TODEVICE);
+	scsi_dma_unmap(sc);
+
+	if (io_req->sgl_cnt)
+		mempool_free(io_req->sgl_list_alloc,
+			     fnic->io_sgl_pool[io_req->sgl_type]);
+	if (io_req->sense_buf_pa)
+		pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
+				 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+}
+
+/* Free up Copy Wq descriptors. Called with copy_wq lock held */
+static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
+{
+	/* if no Ack received from firmware, then nothing to clean */
+	if (!fnic->fw_ack_recd[0])
+		return 1;
+
+	/*
+	 * Update desc_available count based on number of freed descriptors
+	 * Account for wraparound
+	 */
+	if (wq->to_clean_index <= fnic->fw_ack_index[0])
+		wq->ring.desc_avail += (fnic->fw_ack_index[0]
+					- wq->to_clean_index + 1);
+	else
+		wq->ring.desc_avail += (wq->ring.desc_count
+					- wq->to_clean_index
+					+ fnic->fw_ack_index[0] + 1);
+
+	/*
+	 * just bump clean index to ack_index+1 accounting for wraparound
+	 * this will essentially free up all descriptors between
+	 * to_clean_index and fw_ack_index, both inclusive
+	 */
+	wq->to_clean_index =
+		(fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
+
+	/* we have processed the acks received so far */
+	fnic->fw_ack_recd[0] = 0;
+	return 0;
+}
+
+
+/*
+ * fnic_fw_reset_handler
+ * Routine to send reset msg to fw
+ */
+int fnic_fw_reset_handler(struct fnic *fnic)
+{
+	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+		free_wq_copy_descs(fnic, wq);
+
+	if (!vnic_wq_copy_desc_avail(wq))
+		ret = -EAGAIN;
+	else
+		fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
+
+	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+
+	if (!ret)
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "Issued fw reset\n");
+	else
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "Failed to issue fw reset\n");
+	return ret;
+}
+
+
+/*
+ * fnic_flogi_reg_handler
+ * Routine to send flogi register msg to fw
+ */
+int fnic_flogi_reg_handler(struct fnic *fnic)
+{
+	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+	u8 gw_mac[ETH_ALEN];
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+		free_wq_copy_descs(fnic, wq);
+
+	if (!vnic_wq_copy_desc_avail(wq)) {
+		ret = -EAGAIN;
+		goto flogi_reg_ioreq_end;
+	}
+
+	if (fnic->fcoui_mode)
+		memset(gw_mac, 0xff, ETH_ALEN);
+	else
+		memcpy(gw_mac, fnic->dest_addr, ETH_ALEN);
+
+	fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
+					  FCPIO_FLOGI_REG_GW_DEST,
+					  fnic->s_id,
+					  gw_mac);
+
+flogi_reg_ioreq_end:
+	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+
+	if (!ret)
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "flog reg issued\n");
+
+	return ret;
+}
+
+/*
+ * fnic_queue_wq_copy_desc
+ * Routine to enqueue a wq copy desc
+ */
+static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
+					  struct vnic_wq_copy *wq,
+					  struct fnic_io_req *io_req,
+					  struct scsi_cmnd *sc,
+					  u32 sg_count)
+{
+	struct scatterlist *sg;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct host_sg_desc *desc;
+	u8 pri_tag = 0;
+	unsigned int i;
+	unsigned long intr_flags;
+	int flags;
+	u8 exch_flags;
+	struct scsi_lun fc_lun;
+	char msg[2];
+
+	if (sg_count) {
+		BUG_ON(sg_count < 0);
+		BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
+
+		/* For each SGE, create a device desc entry */
+		desc = io_req->sgl_list;
+		for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
+			desc->addr = cpu_to_le64(sg_dma_address(sg));
+			desc->len = cpu_to_le32(sg_dma_len(sg));
+			desc->_resvd = 0;
+			desc++;
+		}
+
+		io_req->sgl_list_pa = pci_map_single
+			(fnic->pdev,
+			 io_req->sgl_list,
+			 sizeof(io_req->sgl_list[0]) * sg_count,
+			 PCI_DMA_TODEVICE);
+	}
+
+	io_req->sense_buf_pa = pci_map_single(fnic->pdev,
+					      sc->sense_buffer,
+					      SCSI_SENSE_BUFFERSIZE,
+					      PCI_DMA_FROMDEVICE);
+
+	int_to_scsilun(sc->device->lun, &fc_lun);
+
+	pri_tag = FCPIO_ICMND_PTA_SIMPLE;
+	msg[0] = MSG_SIMPLE_TAG;
+	scsi_populate_tag_msg(sc, msg);
+	if (msg[0] == MSG_ORDERED_TAG)
+		pri_tag = FCPIO_ICMND_PTA_ORDERED;
+
+	/* Enqueue the descriptor in the Copy WQ */
+	spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+		free_wq_copy_descs(fnic, wq);
+
+	if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
+		spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	flags = 0;
+	if (sc->sc_data_direction == DMA_FROM_DEVICE)
+		flags = FCPIO_ICMND_RDDATA;
+	else if (sc->sc_data_direction == DMA_TO_DEVICE)
+		flags = FCPIO_ICMND_WRDATA;
+
+	exch_flags = 0;
+	if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
+	    (rp->flags & FC_RP_FLAGS_RETRY))
+		exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
+
+	fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
+					 0, exch_flags, io_req->sgl_cnt,
+					 SCSI_SENSE_BUFFERSIZE,
+					 io_req->sgl_list_pa,
+					 io_req->sense_buf_pa,
+					 0, /* scsi cmd ref, always 0 */
+					 pri_tag, /* scsi pri and tag */
+					 flags,	/* command flags */
+					 sc->cmnd, scsi_bufflen(sc),
+					 fc_lun.scsi_lun, io_req->port_id,
+					 rport->maxframe_size, rp->r_a_tov,
+					 rp->e_d_tov);
+
+	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+	return 0;
+}
+
+/*
+ * fnic_queuecommand
+ * Routine to send a scsi cdb
+ * Called with host_lock held and interrupts disabled.
+ */
+int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+{
+	struct fc_lport *lp;
+	struct fc_rport *rport;
+	struct fnic_io_req *io_req;
+	struct fnic *fnic;
+	struct vnic_wq_copy *wq;
+	int ret;
+	u32 sg_count;
+	unsigned long flags;
+	unsigned long ptr;
+
+	rport = starget_to_rport(scsi_target(sc->device));
+	ret = fc_remote_port_chkready(rport);
+	if (ret) {
+		sc->result = ret;
+		done(sc);
+		return 0;
+	}
+
+	lp = shost_priv(sc->device->host);
+	if (lp->state != LPORT_ST_READY || !(lp->link_up))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	/*
+	 * Release host lock, use driver resource specific locks from here.
+	 * Don't re-enable interrupts in case they were disabled prior to the
+	 * caller disabling them.
+	 */
+	spin_unlock(lp->host->host_lock);
+
+	/* Get a new io_req for this SCSI IO */
+	fnic = lport_priv(lp);
+
+	io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
+	if (!io_req) {
+		ret = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+	memset(io_req, 0, sizeof(*io_req));
+
+	/* Map the data buffer */
+	sg_count = scsi_dma_map(sc);
+	if (sg_count < 0) {
+		mempool_free(io_req, fnic->io_req_pool);
+		goto out;
+	}
+
+	/* Determine the type of scatter/gather list we need */
+	io_req->sgl_cnt = sg_count;
+	io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
+	if (sg_count > FNIC_DFLT_SG_DESC_CNT)
+		io_req->sgl_type = FNIC_SGL_CACHE_MAX;
+
+	if (sg_count) {
+		io_req->sgl_list =
+			mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
+				      GFP_ATOMIC | GFP_DMA);
+		if (!io_req->sgl_list) {
+			ret = SCSI_MLQUEUE_HOST_BUSY;
+			scsi_dma_unmap(sc);
+			mempool_free(io_req, fnic->io_req_pool);
+			goto out;
+		}
+
+		/* Cache sgl list allocated address before alignment */
+		io_req->sgl_list_alloc = io_req->sgl_list;
+		ptr = (unsigned long) io_req->sgl_list;
+		if (ptr % FNIC_SG_DESC_ALIGN) {
+			io_req->sgl_list = (struct host_sg_desc *)
+				(((unsigned long) ptr
+				  + FNIC_SG_DESC_ALIGN - 1)
+				 & ~(FNIC_SG_DESC_ALIGN - 1));
+		}
+	}
+
+	/* initialize rest of io_req */
+	io_req->port_id = rport->port_id;
+	CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
+	CMD_SP(sc) = (char *)io_req;
+	sc->scsi_done = done;
+
+	/* create copy wq desc and enqueue it */
+	wq = &fnic->wq_copy[0];
+	ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
+	if (ret) {
+		/*
+		 * In case another thread cancelled the request,
+		 * refetch the pointer under the lock.
+		 */
+		spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
+
+		spin_lock_irqsave(io_lock, flags);
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+		CMD_SP(sc) = NULL;
+		CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+		spin_unlock_irqrestore(io_lock, flags);
+		if (io_req) {
+			fnic_release_ioreq_buf(fnic, io_req, sc);
+			mempool_free(io_req, fnic->io_req_pool);
+		}
+	}
+out:
+	/* acquire host lock before returning to SCSI */
+	spin_lock(lp->host->host_lock);
+	return ret;
+}
+
+/*
+ * fnic_fcpio_fw_reset_cmpl_handler
+ * Routine to handle fw reset completion
+ */
+static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
+					    struct fcpio_fw_req *desc)
+{
+	u8 type;
+	u8 hdr_status;
+	struct fcpio_tag tag;
+	int ret = 0;
+	struct fc_frame *flogi;
+	unsigned long flags;
+
+	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+
+	/* Clean up all outstanding io requests */
+	fnic_cleanup_io(fnic, SCSI_NO_TAG);
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+	flogi = fnic->flogi;
+	fnic->flogi = NULL;
+
+	/* fnic should be in FC_TRANS_ETH_MODE */
+	if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
+		/* Check status of reset completion */
+		if (!hdr_status) {
+			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+				      "reset cmpl success\n");
+			/* Ready to send flogi out */
+			fnic->state = FNIC_IN_ETH_MODE;
+		} else {
+			FNIC_SCSI_DBG(KERN_DEBUG,
+				      fnic->lport->host,
+				      "fnic fw_reset : failed %s\n",
+				      fnic_fcpio_status_to_str(hdr_status));
+
+			/*
+			 * Unable to change to eth mode, cannot send out flogi
+			 * Change state to fc mode, so that subsequent Flogi
+			 * requests from libFC will cause more attempts to
+			 * reset the firmware. Free the cached flogi
+			 */
+			fnic->state = FNIC_IN_FC_MODE;
+			ret = -1;
+		}
+	} else {
+		FNIC_SCSI_DBG(KERN_DEBUG,
+			      fnic->lport->host,
+			      "Unexpected state %s while processing"
+			      " reset cmpl\n", fnic_state_to_str(fnic->state));
+		ret = -1;
+	}
+
+	/* Thread removing device blocks till firmware reset is complete */
+	if (fnic->remove_wait)
+		complete(fnic->remove_wait);
+
+	/*
+	 * If fnic is being removed, or fw reset failed
+	 * free the flogi frame. Else, send it out
+	 */
+	if (fnic->remove_wait || ret) {
+		fnic->flogi_oxid = FC_XID_UNKNOWN;
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		if (flogi)
+			dev_kfree_skb_irq(fp_skb(flogi));
+		goto reset_cmpl_handler_end;
+	}
+
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	if (flogi)
+		ret = fnic_send_frame(fnic, flogi);
+
+ reset_cmpl_handler_end:
+	return ret;
+}
+
+/*
+ * fnic_fcpio_flogi_reg_cmpl_handler
+ * Routine to handle flogi register completion
+ */
+static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
+					     struct fcpio_fw_req *desc)
+{
+	u8 type;
+	u8 hdr_status;
+	struct fcpio_tag tag;
+	int ret = 0;
+	struct fc_frame *flogi_resp = NULL;
+	unsigned long flags;
+	struct sk_buff *skb;
+
+	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+
+	/* Update fnic state based on status of flogi reg completion */
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+	flogi_resp = fnic->flogi_resp;
+	fnic->flogi_resp = NULL;
+
+	if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
+
+		/* Check flogi registration completion status */
+		if (!hdr_status) {
+			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+				      "flog reg succeeded\n");
+			fnic->state = FNIC_IN_FC_MODE;
+		} else {
+			FNIC_SCSI_DBG(KERN_DEBUG,
+				      fnic->lport->host,
+				      "fnic flogi reg :failed %s\n",
+				      fnic_fcpio_status_to_str(hdr_status));
+			fnic->state = FNIC_IN_ETH_MODE;
+			ret = -1;
+		}
+	} else {
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "Unexpected fnic state %s while"
+			      " processing flogi reg completion\n",
+			      fnic_state_to_str(fnic->state));
+		ret = -1;
+	}
+
+	/* Successful flogi reg cmpl, pass frame to LibFC */
+	if (!ret && flogi_resp) {
+		if (fnic->stop_rx_link_events) {
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			goto reg_cmpl_handler_end;
+		}
+		skb = (struct sk_buff *)flogi_resp;
+		/* Use fr_flags to indicate whether flogi resp or not */
+		fr_flags(flogi_resp) = 1;
+		fr_dev(flogi_resp) = fnic->lport;
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+		skb_queue_tail(&fnic->frame_queue, skb);
+		queue_work(fnic_event_queue, &fnic->frame_work);
+
+	} else {
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		if (flogi_resp)
+			dev_kfree_skb_irq(fp_skb(flogi_resp));
+	}
+
+reg_cmpl_handler_end:
+	return ret;
+}
+
+static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
+					u16 request_out)
+{
+	if (wq->to_clean_index <= wq->to_use_index) {
+		/* out of range, stale request_out index */
+		if (request_out < wq->to_clean_index ||
+		    request_out >= wq->to_use_index)
+			return 0;
+	} else {
+		/* out of range, stale request_out index */
+		if (request_out < wq->to_clean_index &&
+		    request_out >= wq->to_use_index)
+			return 0;
+	}
+	/* request_out index is in range */
+	return 1;
+}
+
+
+/*
+ * Mark that ack received and store the Ack index. If there are multiple
+ * acks received before Tx thread cleans it up, the latest value will be
+ * used which is correct behavior. This state should be in the copy Wq
+ * instead of in the fnic
+ */
+static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
+					  unsigned int cq_index,
+					  struct fcpio_fw_req *desc)
+{
+	struct vnic_wq_copy *wq;
+	u16 request_out = desc->u.ack.request_out;
+	unsigned long flags;
+
+	/* mark the ack state */
+	wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
+	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+	if (is_ack_index_in_range(wq, request_out)) {
+		fnic->fw_ack_index[0] = request_out;
+		fnic->fw_ack_recd[0] = 1;
+	}
+	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+}
+
+/*
+ * fnic_fcpio_icmnd_cmpl_handler
+ * Routine to handle icmnd completions
+ */
+static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
+					 struct fcpio_fw_req *desc)
+{
+	u8 type;
+	u8 hdr_status;
+	struct fcpio_tag tag;
+	u32 id;
+	u64 xfer_len = 0;
+	struct fcpio_icmnd_cmpl *icmnd_cmpl;
+	struct fnic_io_req *io_req;
+	struct scsi_cmnd *sc;
+	unsigned long flags;
+	spinlock_t *io_lock;
+
+	/* Decode the cmpl description to get the io_req id */
+	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+	fcpio_tag_id_dec(&tag, &id);
+
+	if (id >= FNIC_MAX_IO_REQ)
+		return;
+
+	sc = scsi_host_find_tag(fnic->lport->host, id);
+	WARN_ON_ONCE(!sc);
+	if (!sc)
+		return;
+
+	io_lock = fnic_io_lock_hash(fnic, sc);
+	spin_lock_irqsave(io_lock, flags);
+	io_req = (struct fnic_io_req *)CMD_SP(sc);
+	WARN_ON_ONCE(!io_req);
+	if (!io_req) {
+		spin_unlock_irqrestore(io_lock, flags);
+		return;
+	}
+
+	/* firmware completed the io */
+	io_req->io_completed = 1;
+
+	/*
+	 *  if SCSI-ML has already issued abort on this command,
+	 * ignore completion of the IO. The abts path will clean it up
+	 */
+	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+		spin_unlock_irqrestore(io_lock, flags);
+		return;
+	}
+
+	/* Mark the IO as complete */
+	CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+
+	icmnd_cmpl = &desc->u.icmnd_cmpl;
+
+	switch (hdr_status) {
+	case FCPIO_SUCCESS:
+		sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
+		xfer_len = scsi_bufflen(sc);
+		scsi_set_resid(sc, icmnd_cmpl->residual);
+
+		if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
+			xfer_len -= icmnd_cmpl->residual;
+
+		/*
+		 * If queue_full, then try to reduce queue depth for all
+		 * LUNS on the target. Todo: this should be accompanied
+		 * by a periodic queue_depth rampup based on successful
+		 * IO completion.
+		 */
+		if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
+			struct scsi_device *t_sdev;
+			int qd = 0;
+
+			shost_for_each_device(t_sdev, sc->device->host) {
+				if (t_sdev->id != sc->device->id)
+					continue;
+
+				if (t_sdev->queue_depth > 1) {
+					qd = scsi_track_queue_full
+						(t_sdev,
+						 t_sdev->queue_depth - 1);
+					if (qd == -1)
+						qd = t_sdev->host->cmd_per_lun;
+					shost_printk(KERN_INFO,
+						     fnic->lport->host,
+						     "scsi[%d:%d:%d:%d"
+						     "] queue full detected,"
+						     "new depth = %d\n",
+						     t_sdev->host->host_no,
+						     t_sdev->channel,
+						     t_sdev->id, t_sdev->lun,
+						     t_sdev->queue_depth);
+				}
+			}
+		}
+		break;
+
+	case FCPIO_TIMEOUT:          /* request was timed out */
+		sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
+		break;
+
+	case FCPIO_ABORTED:          /* request was aborted */
+		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+		break;
+
+	case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
+		scsi_set_resid(sc, icmnd_cmpl->residual);
+		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+		break;
+
+	case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
+		sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
+		break;
+	case FCPIO_INVALID_HEADER:   /* header contains invalid data */
+	case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
+	case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
+	case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
+	case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
+	case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
+	case FCPIO_FW_ERR:           /* request was terminated due fw error */
+	default:
+		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+			     fnic_fcpio_status_to_str(hdr_status));
+		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+		break;
+	}
+
+	/* Break link with the SCSI command */
+	CMD_SP(sc) = NULL;
+
+	spin_unlock_irqrestore(io_lock, flags);
+
+	fnic_release_ioreq_buf(fnic, io_req, sc);
+
+	mempool_free(io_req, fnic->io_req_pool);
+
+	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+		fnic->lport->host_stats.fcp_input_requests++;
+		fnic->fcp_input_bytes += xfer_len;
+	} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
+		fnic->lport->host_stats.fcp_output_requests++;
+		fnic->fcp_output_bytes += xfer_len;
+	} else
+		fnic->lport->host_stats.fcp_control_requests++;
+
+	/* Call SCSI completion function to complete the IO */
+	if (sc->scsi_done)
+		sc->scsi_done(sc);
+
+}
+
+/* fnic_fcpio_itmf_cmpl_handler
+ * Routine to handle itmf completions
+ */
+static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
+					struct fcpio_fw_req *desc)
+{
+	u8 type;
+	u8 hdr_status;
+	struct fcpio_tag tag;
+	u32 id;
+	struct scsi_cmnd *sc;
+	struct fnic_io_req *io_req;
+	unsigned long flags;
+	spinlock_t *io_lock;
+
+	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+	fcpio_tag_id_dec(&tag, &id);
+
+	if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
+		return;
+
+	sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+	WARN_ON_ONCE(!sc);
+	if (!sc)
+		return;
+
+	io_lock = fnic_io_lock_hash(fnic, sc);
+	spin_lock_irqsave(io_lock, flags);
+	io_req = (struct fnic_io_req *)CMD_SP(sc);
+	WARN_ON_ONCE(!io_req);
+	if (!io_req) {
+		spin_unlock_irqrestore(io_lock, flags);
+		return;
+	}
+
+	if (id & FNIC_TAG_ABORT) {
+		/* Completion of abort cmd */
+		if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
+			/* This is a late completion. Ignore it */
+			spin_unlock_irqrestore(io_lock, flags);
+			return;
+		}
+		CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+		CMD_ABTS_STATUS(sc) = hdr_status;
+
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "abts cmpl recd. id %d status %s\n",
+			      (int)(id & FNIC_TAG_MASK),
+			      fnic_fcpio_status_to_str(hdr_status));
+
+		/*
+		 * If scsi_eh thread is blocked waiting for abts to complete,
+		 * signal completion to it. IO will be cleaned in the thread
+		 * else clean it in this context
+		 */
+		if (io_req->abts_done) {
+			complete(io_req->abts_done);
+			spin_unlock_irqrestore(io_lock, flags);
+		} else {
+			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+				      "abts cmpl, completing IO\n");
+			CMD_SP(sc) = NULL;
+			sc->result = (DID_ERROR << 16);
+
+			spin_unlock_irqrestore(io_lock, flags);
+
+			fnic_release_ioreq_buf(fnic, io_req, sc);
+			mempool_free(io_req, fnic->io_req_pool);
+			if (sc->scsi_done)
+				sc->scsi_done(sc);
+		}
+
+	} else if (id & FNIC_TAG_DEV_RST) {
+		/* Completion of device reset */
+		CMD_LR_STATUS(sc) = hdr_status;
+		CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "dev reset cmpl recd. id %d status %s\n",
+			      (int)(id & FNIC_TAG_MASK),
+			      fnic_fcpio_status_to_str(hdr_status));
+		if (io_req->dr_done)
+			complete(io_req->dr_done);
+		spin_unlock_irqrestore(io_lock, flags);
+
+	} else {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			     "Unexpected itmf io state %s tag %x\n",
+			     fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
+		spin_unlock_irqrestore(io_lock, flags);
+	}
+
+}
+
+/*
+ * fnic_fcpio_cmpl_handler
+ * Routine to service the cq for wq_copy
+ */
+static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
+				   unsigned int cq_index,
+				   struct fcpio_fw_req *desc)
+{
+	struct fnic *fnic = vnic_dev_priv(vdev);
+	int ret = 0;
+
+	switch (desc->hdr.type) {
+	case FCPIO_ACK: /* fw copied copy wq desc to its queue */
+		fnic_fcpio_ack_handler(fnic, cq_index, desc);
+		break;
+
+	case FCPIO_ICMND_CMPL: /* fw completed a command */
+		fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
+		break;
+
+	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
+		fnic_fcpio_itmf_cmpl_handler(fnic, desc);
+		break;
+
+	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+		ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+		break;
+
+	case FCPIO_RESET_CMPL: /* fw completed reset */
+		ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+		break;
+
+	default:
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "firmware completion type %d\n",
+			      desc->hdr.type);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * fnic_wq_copy_cmpl_handler
+ * Routine to process wq copy
+ */
+int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
+{
+	unsigned int wq_work_done = 0;
+	unsigned int i, cq_index;
+	unsigned int cur_work_done;
+
+	for (i = 0; i < fnic->wq_copy_count; i++) {
+		cq_index = i + fnic->raw_wq_count + fnic->rq_count;
+		cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
+						     fnic_fcpio_cmpl_handler,
+						     copy_work_to_do);
+		wq_work_done += cur_work_done;
+	}
+	return wq_work_done;
+}
+
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
+{
+	unsigned int i;
+	struct fnic_io_req *io_req;
+	unsigned long flags = 0;
+	struct scsi_cmnd *sc;
+	spinlock_t *io_lock;
+
+	for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
+		if (i == exclude_id)
+			continue;
+
+		sc = scsi_host_find_tag(fnic->lport->host, i);
+		if (!sc)
+			continue;
+
+		io_lock = fnic_io_lock_hash(fnic, sc);
+		spin_lock_irqsave(io_lock, flags);
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+		if (!io_req) {
+			spin_unlock_irqrestore(io_lock, flags);
+			goto cleanup_scsi_cmd;
+		}
+
+		CMD_SP(sc) = NULL;
+
+		spin_unlock_irqrestore(io_lock, flags);
+
+		/*
+		 * If there is a scsi_cmnd associated with this io_req, then
+		 * free the corresponding state
+		 */
+		fnic_release_ioreq_buf(fnic, io_req, sc);
+		mempool_free(io_req, fnic->io_req_pool);
+
+cleanup_scsi_cmd:
+		sc->result = DID_TRANSPORT_DISRUPTED << 16;
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
+			      " DID_TRANSPORT_DISRUPTED\n");
+
+		/* Complete the command to SCSI */
+		if (sc->scsi_done)
+			sc->scsi_done(sc);
+	}
+}
+
+void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
+				  struct fcpio_host_req *desc)
+{
+	u32 id;
+	struct fnic *fnic = vnic_dev_priv(wq->vdev);
+	struct fnic_io_req *io_req;
+	struct scsi_cmnd *sc;
+	unsigned long flags;
+	spinlock_t *io_lock;
+
+	/* get the tag reference */
+	fcpio_tag_id_dec(&desc->hdr.tag, &id);
+	id &= FNIC_TAG_MASK;
+
+	if (id >= FNIC_MAX_IO_REQ)
+		return;
+
+	sc = scsi_host_find_tag(fnic->lport->host, id);
+	if (!sc)
+		return;
+
+	io_lock = fnic_io_lock_hash(fnic, sc);
+	spin_lock_irqsave(io_lock, flags);
+
+	/* Get the IO context which this desc refers to */
+	io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+	/* fnic interrupts are turned off by now */
+
+	if (!io_req) {
+		spin_unlock_irqrestore(io_lock, flags);
+		goto wq_copy_cleanup_scsi_cmd;
+	}
+
+	CMD_SP(sc) = NULL;
+
+	spin_unlock_irqrestore(io_lock, flags);
+
+	fnic_release_ioreq_buf(fnic, io_req, sc);
+	mempool_free(io_req, fnic->io_req_pool);
+
+wq_copy_cleanup_scsi_cmd:
+	sc->result = DID_NO_CONNECT << 16;
+	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
+		      " DID_NO_CONNECT\n");
+
+	if (sc->scsi_done)
+		sc->scsi_done(sc);
+}
+
+static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
+					  u32 task_req, u8 *fc_lun,
+					  struct fnic_io_req *io_req)
+{
+	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+		free_wq_copy_descs(fnic, wq);
+
+	if (!vnic_wq_copy_desc_avail(wq)) {
+		spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+		return 1;
+	}
+	fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
+				     0, task_req, tag, fc_lun, io_req->port_id,
+				     fnic->config.ra_tov, fnic->config.ed_tov);
+
+	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+	return 0;
+}
+
+void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+{
+	int tag;
+	struct fnic_io_req *io_req;
+	spinlock_t *io_lock;
+	unsigned long flags;
+	struct scsi_cmnd *sc;
+	struct scsi_lun fc_lun;
+	enum fnic_ioreq_state old_ioreq_state;
+
+	FNIC_SCSI_DBG(KERN_DEBUG,
+		      fnic->lport->host,
+		      "fnic_rport_reset_exch called portid 0x%06x\n",
+		      port_id);
+
+	if (fnic->in_remove)
+		return;
+
+	for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+		sc = scsi_host_find_tag(fnic->lport->host, tag);
+		if (!sc)
+			continue;
+
+		io_lock = fnic_io_lock_hash(fnic, sc);
+		spin_lock_irqsave(io_lock, flags);
+
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+		if (!io_req || io_req->port_id != port_id) {
+			spin_unlock_irqrestore(io_lock, flags);
+			continue;
+		}
+
+		/*
+		 * Found IO that is still pending with firmware and
+		 * belongs to rport that went away
+		 */
+		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+			spin_unlock_irqrestore(io_lock, flags);
+			continue;
+		}
+		old_ioreq_state = CMD_STATE(sc);
+		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+
+		BUG_ON(io_req->abts_done);
+
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "fnic_rport_reset_exch: Issuing abts\n");
+
+		spin_unlock_irqrestore(io_lock, flags);
+
+		/* Now queue the abort command to firmware */
+		int_to_scsilun(sc->device->lun, &fc_lun);
+
+		if (fnic_queue_abort_io_req(fnic, tag,
+					    FCPIO_ITMF_ABT_TASK_TERM,
+					    fc_lun.scsi_lun, io_req)) {
+			/*
+			 * Revert the cmd state back to old state, if
+			 * it hasnt changed in between. This cmd will get
+			 * aborted later by scsi_eh, or cleaned up during
+			 * lun reset
+			 */
+			io_lock = fnic_io_lock_hash(fnic, sc);
+
+			spin_lock_irqsave(io_lock, flags);
+			if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+				CMD_STATE(sc) = old_ioreq_state;
+			spin_unlock_irqrestore(io_lock, flags);
+		}
+	}
+
+}
+
+void fnic_terminate_rport_io(struct fc_rport *rport)
+{
+	int tag;
+	struct fnic_io_req *io_req;
+	spinlock_t *io_lock;
+	unsigned long flags;
+	struct scsi_cmnd *sc;
+	struct scsi_lun fc_lun;
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_lport *lport = rdata->local_port;
+	struct fnic *fnic = lport_priv(lport);
+	struct fc_rport *cmd_rport;
+	enum fnic_ioreq_state old_ioreq_state;
+
+	FNIC_SCSI_DBG(KERN_DEBUG,
+		      fnic->lport->host, "fnic_terminate_rport_io called"
+		      " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
+		      rport->port_name, rport->node_name,
+		      rport->port_id);
+
+	if (fnic->in_remove)
+		return;
+
+	for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+		sc = scsi_host_find_tag(fnic->lport->host, tag);
+		if (!sc)
+			continue;
+
+		cmd_rport = starget_to_rport(scsi_target(sc->device));
+		if (rport != cmd_rport)
+			continue;
+
+		io_lock = fnic_io_lock_hash(fnic, sc);
+		spin_lock_irqsave(io_lock, flags);
+
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+		if (!io_req || rport != cmd_rport) {
+			spin_unlock_irqrestore(io_lock, flags);
+			continue;
+		}
+
+		/*
+		 * Found IO that is still pending with firmware and
+		 * belongs to rport that went away
+		 */
+		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+			spin_unlock_irqrestore(io_lock, flags);
+			continue;
+		}
+		old_ioreq_state = CMD_STATE(sc);
+		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+
+		BUG_ON(io_req->abts_done);
+
+		FNIC_SCSI_DBG(KERN_DEBUG,
+			      fnic->lport->host,
+			      "fnic_terminate_rport_io: Issuing abts\n");
+
+		spin_unlock_irqrestore(io_lock, flags);
+
+		/* Now queue the abort command to firmware */
+		int_to_scsilun(sc->device->lun, &fc_lun);
+
+		if (fnic_queue_abort_io_req(fnic, tag,
+					    FCPIO_ITMF_ABT_TASK_TERM,
+					    fc_lun.scsi_lun, io_req)) {
+			/*
+			 * Revert the cmd state back to old state, if
+			 * it hasnt changed in between. This cmd will get
+			 * aborted later by scsi_eh, or cleaned up during
+			 * lun reset
+			 */
+			io_lock = fnic_io_lock_hash(fnic, sc);
+
+			spin_lock_irqsave(io_lock, flags);
+			if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+				CMD_STATE(sc) = old_ioreq_state;
+			spin_unlock_irqrestore(io_lock, flags);
+		}
+	}
+
+}
+
+static void fnic_block_error_handler(struct scsi_cmnd *sc)
+{
+	struct Scsi_Host *shost = sc->device->host;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
+	unsigned long flags;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	while (rport->port_state == FC_PORTSTATE_BLOCKED) {
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		msleep(1000);
+		spin_lock_irqsave(shost->host_lock, flags);
+	}
+	spin_unlock_irqrestore(shost->host_lock, flags);
+
+}
+
+/*
+ * This function is exported to SCSI for sending abort cmnds.
+ * A SCSI IO is represented by a io_req in the driver.
+ * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
+ */
+int fnic_abort_cmd(struct scsi_cmnd *sc)
+{
+	struct fc_lport *lp;
+	struct fnic *fnic;
+	struct fnic_io_req *io_req;
+	struct fc_rport *rport;
+	spinlock_t *io_lock;
+	unsigned long flags;
+	int ret = SUCCESS;
+	u32 task_req;
+	struct scsi_lun fc_lun;
+	DECLARE_COMPLETION_ONSTACK(tm_done);
+
+	/* Wait for rport to unblock */
+	fnic_block_error_handler(sc);
+
+	/* Get local-port, check ready and link up */
+	lp = shost_priv(sc->device->host);
+
+	fnic = lport_priv(lp);
+	FNIC_SCSI_DBG(KERN_DEBUG,
+		      fnic->lport->host,
+		      "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
+		      (starget_to_rport(scsi_target(sc->device)))->port_id,
+		      sc->device->lun, sc->request->tag);
+
+	if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+		ret = FAILED;
+		goto fnic_abort_cmd_end;
+	}
+
+	/*
+	 * Avoid a race between SCSI issuing the abort and the device
+	 * completing the command.
+	 *
+	 * If the command is already completed by the fw cmpl code,
+	 * we just return SUCCESS from here. This means that the abort
+	 * succeeded. In the SCSI ML, since the timeout for command has
+	 * happened, the completion wont actually complete the command
+	 * and it will be considered as an aborted command
+	 *
+	 * The CMD_SP will not be cleared except while holding io_req_lock.
+	 */
+	io_lock = fnic_io_lock_hash(fnic, sc);
+	spin_lock_irqsave(io_lock, flags);
+	io_req = (struct fnic_io_req *)CMD_SP(sc);
+	if (!io_req) {
+		spin_unlock_irqrestore(io_lock, flags);
+		goto fnic_abort_cmd_end;
+	}
+
+	io_req->abts_done = &tm_done;
+
+	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+		spin_unlock_irqrestore(io_lock, flags);
+		goto wait_pending;
+	}
+	/*
+	 * Command is still pending, need to abort it
+	 * If the firmware completes the command after this point,
+	 * the completion wont be done till mid-layer, since abort
+	 * has already started.
+	 */
+	CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+	CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+
+	spin_unlock_irqrestore(io_lock, flags);
+
+	/*
+	 * Check readiness of the remote port. If the path to remote
+	 * port is up, then send abts to the remote port to terminate
+	 * the IO. Else, just locally terminate the IO in the firmware
+	 */
+	rport = starget_to_rport(scsi_target(sc->device));
+	if (fc_remote_port_chkready(rport) == 0)
+		task_req = FCPIO_ITMF_ABT_TASK;
+	else
+		task_req = FCPIO_ITMF_ABT_TASK_TERM;
+
+	/* Now queue the abort command to firmware */
+	int_to_scsilun(sc->device->lun, &fc_lun);
+
+	if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
+				    fc_lun.scsi_lun, io_req)) {
+		spin_lock_irqsave(io_lock, flags);
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+		if (io_req)
+			io_req->abts_done = NULL;
+		spin_unlock_irqrestore(io_lock, flags);
+		ret = FAILED;
+		goto fnic_abort_cmd_end;
+	}
+
+	/*
+	 * We queued an abort IO, wait for its completion.
+	 * Once the firmware completes the abort command, it will
+	 * wake up this thread.
+	 */
+ wait_pending:
+	wait_for_completion_timeout(&tm_done,
+				    msecs_to_jiffies
+				    (2 * fnic->config.ra_tov +
+				     fnic->config.ed_tov));
+
+	/* Check the abort status */
+	spin_lock_irqsave(io_lock, flags);
+
+	io_req = (struct fnic_io_req *)CMD_SP(sc);
+	if (!io_req) {
+		spin_unlock_irqrestore(io_lock, flags);
+		ret = FAILED;
+		goto fnic_abort_cmd_end;
+	}
+	io_req->abts_done = NULL;
+
+	/* fw did not complete abort, timed out */
+	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+		spin_unlock_irqrestore(io_lock, flags);
+		ret = FAILED;
+		goto fnic_abort_cmd_end;
+	}
+
+	/*
+	 * firmware completed the abort, check the status,
+	 * free the io_req irrespective of failure or success
+	 */
+	if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
+		ret = FAILED;
+
+	CMD_SP(sc) = NULL;
+
+	spin_unlock_irqrestore(io_lock, flags);
+
+	fnic_release_ioreq_buf(fnic, io_req, sc);
+	mempool_free(io_req, fnic->io_req_pool);
+
+fnic_abort_cmd_end:
+	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+		      "Returning from abort cmd %s\n",
+		      (ret == SUCCESS) ?
+		      "SUCCESS" : "FAILED");
+	return ret;
+}
+
+static inline int fnic_queue_dr_io_req(struct fnic *fnic,
+				       struct scsi_cmnd *sc,
+				       struct fnic_io_req *io_req)
+{
+	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+	struct scsi_lun fc_lun;
+	int ret = 0;
+	unsigned long intr_flags;
+
+	spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+		free_wq_copy_descs(fnic, wq);
+
+	if (!vnic_wq_copy_desc_avail(wq)) {
+		ret = -EAGAIN;
+		goto lr_io_req_end;
+	}
+
+	/* fill in the lun info */
+	int_to_scsilun(sc->device->lun, &fc_lun);
+
+	fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
+				     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
+				     fc_lun.scsi_lun, io_req->port_id,
+				     fnic->config.ra_tov, fnic->config.ed_tov);
+
+lr_io_req_end:
+	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+
+	return ret;
+}
+
+/*
+ * Clean up any pending aborts on the lun
+ * For each outstanding IO on this lun, whose abort is not completed by fw,
+ * issue a local abort. Wait for abort to complete. Return 0 if all commands
+ * successfully aborted, 1 otherwise
+ */
+static int fnic_clean_pending_aborts(struct fnic *fnic,
+				     struct scsi_cmnd *lr_sc)
+{
+	int tag;
+	struct fnic_io_req *io_req;
+	spinlock_t *io_lock;
+	unsigned long flags;
+	int ret = 0;
+	struct scsi_cmnd *sc;
+	struct fc_rport *rport;
+	struct scsi_lun fc_lun;
+	struct scsi_device *lun_dev = lr_sc->device;
+	DECLARE_COMPLETION_ONSTACK(tm_done);
+
+	for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+		sc = scsi_host_find_tag(fnic->lport->host, tag);
+		/*
+		 * ignore this lun reset cmd or cmds that do not belong to
+		 * this lun
+		 */
+		if (!sc || sc == lr_sc || sc->device != lun_dev)
+			continue;
+
+		io_lock = fnic_io_lock_hash(fnic, sc);
+		spin_lock_irqsave(io_lock, flags);
+
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+		if (!io_req || sc->device != lun_dev) {
+			spin_unlock_irqrestore(io_lock, flags);
+			continue;
+		}
+
+		/*
+		 * Found IO that is still pending with firmware and
+		 * belongs to the LUN that we are resetting
+		 */
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "Found IO in %s on lun\n",
+			      fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+		BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
+
+		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+		io_req->abts_done = &tm_done;
+		spin_unlock_irqrestore(io_lock, flags);
+
+		/* Now queue the abort command to firmware */
+		int_to_scsilun(sc->device->lun, &fc_lun);
+		rport = starget_to_rport(scsi_target(sc->device));
+
+		if (fnic_queue_abort_io_req(fnic, tag,
+					    FCPIO_ITMF_ABT_TASK_TERM,
+					    fc_lun.scsi_lun, io_req)) {
+			spin_lock_irqsave(io_lock, flags);
+			io_req = (struct fnic_io_req *)CMD_SP(sc);
+			if (io_req)
+				io_req->abts_done = NULL;
+			spin_unlock_irqrestore(io_lock, flags);
+			ret = 1;
+			goto clean_pending_aborts_end;
+		}
+
+		wait_for_completion_timeout(&tm_done,
+					    msecs_to_jiffies
+					    (fnic->config.ed_tov));
+
+		/* Recheck cmd state to check if it is now aborted */
+		spin_lock_irqsave(io_lock, flags);
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+		if (!io_req) {
+			spin_unlock_irqrestore(io_lock, flags);
+			ret = 1;
+			goto clean_pending_aborts_end;
+		}
+
+		io_req->abts_done = NULL;
+
+		/* if abort is still pending with fw, fail */
+		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+			spin_unlock_irqrestore(io_lock, flags);
+			ret = 1;
+			goto clean_pending_aborts_end;
+		}
+		CMD_SP(sc) = NULL;
+		spin_unlock_irqrestore(io_lock, flags);
+
+		fnic_release_ioreq_buf(fnic, io_req, sc);
+		mempool_free(io_req, fnic->io_req_pool);
+	}
+
+clean_pending_aborts_end:
+	return ret;
+}
+
+/*
+ * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
+ * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
+ * on the LUN.
+ */
+int fnic_device_reset(struct scsi_cmnd *sc)
+{
+	struct fc_lport *lp;
+	struct fnic *fnic;
+	struct fnic_io_req *io_req;
+	struct fc_rport *rport;
+	int status;
+	int ret = FAILED;
+	spinlock_t *io_lock;
+	unsigned long flags;
+	DECLARE_COMPLETION_ONSTACK(tm_done);
+
+	/* Wait for rport to unblock */
+	fnic_block_error_handler(sc);
+
+	/* Get local-port, check ready and link up */
+	lp = shost_priv(sc->device->host);
+
+	fnic = lport_priv(lp);
+	FNIC_SCSI_DBG(KERN_DEBUG,
+		      fnic->lport->host,
+		      "Device reset called FCID 0x%x, LUN 0x%x\n",
+		      (starget_to_rport(scsi_target(sc->device)))->port_id,
+		      sc->device->lun);
+
+
+	if (lp->state != LPORT_ST_READY || !(lp->link_up))
+		goto fnic_device_reset_end;
+
+	/* Check if remote port up */
+	rport = starget_to_rport(scsi_target(sc->device));
+	if (fc_remote_port_chkready(rport))
+		goto fnic_device_reset_end;
+
+	io_lock = fnic_io_lock_hash(fnic, sc);
+	spin_lock_irqsave(io_lock, flags);
+	io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+	/*
+	 * If there is a io_req attached to this command, then use it,
+	 * else allocate a new one.
+	 */
+	if (!io_req) {
+		io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
+		if (!io_req) {
+			spin_unlock_irqrestore(io_lock, flags);
+			goto fnic_device_reset_end;
+		}
+		memset(io_req, 0, sizeof(*io_req));
+		io_req->port_id = rport->port_id;
+		CMD_SP(sc) = (char *)io_req;
+	}
+	io_req->dr_done = &tm_done;
+	CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
+	CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
+	spin_unlock_irqrestore(io_lock, flags);
+
+	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
+		      sc->request->tag);
+
+	/*
+	 * issue the device reset, if enqueue failed, clean up the ioreq
+	 * and break assoc with scsi cmd
+	 */
+	if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
+		spin_lock_irqsave(io_lock, flags);
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+		if (io_req)
+			io_req->dr_done = NULL;
+		goto fnic_device_reset_clean;
+	}
+
+	/*
+	 * Wait on the local completion for LUN reset.  The io_req may be
+	 * freed while we wait since we hold no lock.
+	 */
+	wait_for_completion_timeout(&tm_done,
+				    msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+
+	spin_lock_irqsave(io_lock, flags);
+	io_req = (struct fnic_io_req *)CMD_SP(sc);
+	if (!io_req) {
+		spin_unlock_irqrestore(io_lock, flags);
+		goto fnic_device_reset_end;
+	}
+	io_req->dr_done = NULL;
+
+	status = CMD_LR_STATUS(sc);
+	spin_unlock_irqrestore(io_lock, flags);
+
+	/*
+	 * If lun reset not completed, bail out with failed. io_req
+	 * gets cleaned up during higher levels of EH
+	 */
+	if (status == FCPIO_INVALID_CODE) {
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "Device reset timed out\n");
+		goto fnic_device_reset_end;
+	}
+
+	/* Completed, but not successful, clean up the io_req, return fail */
+	if (status != FCPIO_SUCCESS) {
+		spin_lock_irqsave(io_lock, flags);
+		FNIC_SCSI_DBG(KERN_DEBUG,
+			      fnic->lport->host,
+			      "Device reset completed - failed\n");
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+		goto fnic_device_reset_clean;
+	}
+
+	/*
+	 * Clean up any aborts on this lun that have still not
+	 * completed. If any of these fail, then LUN reset fails.
+	 * clean_pending_aborts cleans all cmds on this lun except
+	 * the lun reset cmd. If all cmds get cleaned, the lun reset
+	 * succeeds
+	 */
+	if (fnic_clean_pending_aborts(fnic, sc)) {
+		spin_lock_irqsave(io_lock, flags);
+		io_req = (struct fnic_io_req *)CMD_SP(sc);
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+			      "Device reset failed"
+			      " since could not abort all IOs\n");
+		goto fnic_device_reset_clean;
+	}
+
+	/* Clean lun reset command */
+	spin_lock_irqsave(io_lock, flags);
+	io_req = (struct fnic_io_req *)CMD_SP(sc);
+	if (io_req)
+		/* Completed, and successful */
+		ret = SUCCESS;
+
+fnic_device_reset_clean:
+	if (io_req)
+		CMD_SP(sc) = NULL;
+
+	spin_unlock_irqrestore(io_lock, flags);
+
+	if (io_req) {
+		fnic_release_ioreq_buf(fnic, io_req, sc);
+		mempool_free(io_req, fnic->io_req_pool);
+	}
+
+fnic_device_reset_end:
+	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+		      "Returning from device reset %s\n",
+		      (ret == SUCCESS) ?
+		      "SUCCESS" : "FAILED");
+	return ret;
+}
+
+/* Clean up all IOs, clean up libFC local port */
+int fnic_reset(struct Scsi_Host *shost)
+{
+	struct fc_lport *lp;
+	struct fnic *fnic;
+	int ret = SUCCESS;
+
+	lp = shost_priv(shost);
+	fnic = lport_priv(lp);
+
+	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+		      "fnic_reset called\n");
+
+	/*
+	 * Reset local port, this will clean up libFC exchanges,
+	 * reset remote port sessions, and if link is up, begin flogi
+	 */
+	if (lp->tt.lport_reset(lp))
+		ret = FAILED;
+
+	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+		      "Returning from fnic reset %s\n",
+		      (ret == SUCCESS) ?
+		      "SUCCESS" : "FAILED");
+
+	return ret;
+}
+
+/*
+ * SCSI Error handling calls driver's eh_host_reset if all prior
+ * error handling levels return FAILED. If host reset completes
+ * successfully, and if link is up, then Fabric login begins.
+ *
+ * Host Reset is the highest level of error recovery. If this fails, then
+ * host is offlined by SCSI.
+ *
+ */
+int fnic_host_reset(struct scsi_cmnd *sc)
+{
+	int ret;
+	unsigned long wait_host_tmo;
+	struct Scsi_Host *shost = sc->device->host;
+	struct fc_lport *lp = shost_priv(shost);
+
+	/*
+	 * If fnic_reset is successful, wait for fabric login to complete
+	 * scsi-ml tries to send a TUR to every device if host reset is
+	 * successful, so before returning to scsi, fabric should be up
+	 */
+	ret = fnic_reset(shost);
+	if (ret == SUCCESS) {
+		wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
+		ret = FAILED;
+		while (time_before(jiffies, wait_host_tmo)) {
+			if ((lp->state == LPORT_ST_READY) &&
+			    (lp->link_up)) {
+				ret = SUCCESS;
+				break;
+			}
+			ssleep(1);
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * This fxn is called from libFC when host is removed
+ */
+void fnic_scsi_abort_io(struct fc_lport *lp)
+{
+	int err = 0;
+	unsigned long flags;
+	enum fnic_state old_state;
+	struct fnic *fnic = lport_priv(lp);
+	DECLARE_COMPLETION_ONSTACK(remove_wait);
+
+	/* Issue firmware reset for fnic, wait for reset to complete */
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	fnic->remove_wait = &remove_wait;
+	old_state = fnic->state;
+	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+	vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	err = fnic_fw_reset_handler(fnic);
+	if (err) {
+		spin_lock_irqsave(&fnic->fnic_lock, flags);
+		if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+			fnic->state = old_state;
+		fnic->remove_wait = NULL;
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		return;
+	}
+
+	/* Wait for firmware reset to complete */
+	wait_for_completion_timeout(&remove_wait,
+				    msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	fnic->remove_wait = NULL;
+	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+		      "fnic_scsi_abort_io %s\n",
+		      (fnic->state == FNIC_IN_ETH_MODE) ?
+		      "SUCCESS" : "FAILED");
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+}
+
+/*
+ * This fxn called from libFC to clean up driver IO state on link down
+ */
+void fnic_scsi_cleanup(struct fc_lport *lp)
+{
+	unsigned long flags;
+	enum fnic_state old_state;
+	struct fnic *fnic = lport_priv(lp);
+
+	/* issue fw reset */
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	old_state = fnic->state;
+	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+	vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	if (fnic_fw_reset_handler(fnic)) {
+		spin_lock_irqsave(&fnic->fnic_lock, flags);
+		if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+			fnic->state = old_state;
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+	}
+
+}
+
+void fnic_empty_scsi_cleanup(struct fc_lport *lp)
+{
+}
+
+void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
+{
+	struct fnic *fnic = lport_priv(lp);
+
+	/* Non-zero sid, nothing to do */
+	if (sid)
+		goto call_fc_exch_mgr_reset;
+
+	if (did) {
+		fnic_rport_exch_reset(fnic, did);
+		goto call_fc_exch_mgr_reset;
+	}
+
+	/*
+	 * sid = 0, did = 0
+	 * link down or device being removed
+	 */
+	if (!fnic->in_remove)
+		fnic_scsi_cleanup(lp);
+	else
+		fnic_scsi_abort_io(lp);
+
+	/* call libFC exch mgr reset to reset its exchanges */
+call_fc_exch_mgr_reset:
+	fc_exch_mgr_reset(lp, sid, did);
+
+}
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h
new file mode 100644
index 0000000..92e80ae
--- /dev/null
+++ b/drivers/scsi/fnic/rq_enet_desc.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+	__le64 address;
+	__le16 length_type;
+	u8 reserved[6];
+};
+
+enum rq_enet_type_types {
+	RQ_ENET_TYPE_ONLY_SOP = 0,
+	RQ_ENET_TYPE_NOT_SOP = 1,
+	RQ_ENET_TYPE_RESV2 = 2,
+	RQ_ENET_TYPE_RESV3 = 3,
+};
+
+#define RQ_ENET_ADDR_BITS		64
+#define RQ_ENET_LEN_BITS		14
+#define RQ_ENET_LEN_MASK		((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS		2
+#define RQ_ENET_TYPE_MASK		((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
+	u64 address, u8 type, u16 length)
+{
+	desc->address = cpu_to_le64(address);
+	desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+		((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
+	u64 *address, u8 *type, u16 *length)
+{
+	*address = le64_to_cpu(desc->address);
+	*length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
+	*type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+		RQ_ENET_TYPE_MASK);
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c
new file mode 100644
index 0000000..c5db32e
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void vnic_cq_free(struct vnic_cq *cq)
+{
+	vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+	cq->ctrl = NULL;
+}
+
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	cq->index = index;
+	cq->vdev = vdev;
+
+	cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+	if (!cq->ctrl) {
+		printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+	unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+	unsigned int cq_tail_color, unsigned int interrupt_enable,
+	unsigned int cq_entry_enable, unsigned int cq_message_enable,
+	unsigned int interrupt_offset, u64 cq_message_addr)
+{
+	u64 paddr;
+
+	paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &cq->ctrl->ring_base);
+	iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+	iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+	iowrite32(color_enable, &cq->ctrl->color_enable);
+	iowrite32(cq_head, &cq->ctrl->cq_head);
+	iowrite32(cq_tail, &cq->ctrl->cq_tail);
+	iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+	iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+	iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+	iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+	iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+	writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+	cq->to_clean = 0;
+	cq->last_color = 0;
+
+	iowrite32(0, &cq->ctrl->cq_head);
+	iowrite32(0, &cq->ctrl->cq_tail);
+	iowrite32(1, &cq->ctrl->cq_tail_color);
+
+	vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h
new file mode 100644
index 0000000..4ede680
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_cq_service fnic_cq_service
+#define vnic_cq_free fnic_cq_free
+#define vnic_cq_alloc fnic_cq_alloc
+#define vnic_cq_init fnic_cq_init
+#define vnic_cq_clean fnic_cq_clean
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+	u64 ring_base;			/* 0x00 */
+	u32 ring_size;			/* 0x08 */
+	u32 pad0;
+	u32 flow_control_enable;	/* 0x10 */
+	u32 pad1;
+	u32 color_enable;		/* 0x18 */
+	u32 pad2;
+	u32 cq_head;			/* 0x20 */
+	u32 pad3;
+	u32 cq_tail;			/* 0x28 */
+	u32 pad4;
+	u32 cq_tail_color;		/* 0x30 */
+	u32 pad5;
+	u32 interrupt_enable;		/* 0x38 */
+	u32 pad6;
+	u32 cq_entry_enable;		/* 0x40 */
+	u32 pad7;
+	u32 cq_message_enable;		/* 0x48 */
+	u32 pad8;
+	u32 interrupt_offset;		/* 0x50 */
+	u32 pad9;
+	u64 cq_message_addr;		/* 0x58 */
+	u32 pad10;
+};
+
+struct vnic_cq {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_cq_ctrl __iomem *ctrl;	/* memory-mapped */
+	struct vnic_dev_ring ring;
+	unsigned int to_clean;
+	unsigned int last_color;
+};
+
+static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
+	unsigned int work_to_do,
+	int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+	u8 type, u16 q_number, u16 completed_index, void *opaque),
+	void *opaque)
+{
+	struct cq_desc *cq_desc;
+	unsigned int work_done = 0;
+	u16 q_number, completed_index;
+	u8 type, color;
+
+	cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+		cq->ring.desc_size * cq->to_clean);
+	cq_desc_dec(cq_desc, &type, &color,
+		&q_number, &completed_index);
+
+	while (color != cq->last_color) {
+
+		if ((*q_service)(cq->vdev, cq_desc, type,
+			q_number, completed_index, opaque))
+			break;
+
+		cq->to_clean++;
+		if (cq->to_clean == cq->ring.desc_count) {
+			cq->to_clean = 0;
+			cq->last_color = cq->last_color ? 0 : 1;
+		}
+
+		cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+			cq->ring.desc_size * cq->to_clean);
+		cq_desc_dec(cq_desc, &type, &color,
+			&q_number, &completed_index);
+
+		work_done++;
+		if (work_done >= work_to_do)
+			break;
+	}
+
+	return work_done;
+}
+
+void vnic_cq_free(struct vnic_cq *cq);
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+	unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+	unsigned int cq_tail_color, unsigned int interrupt_enable,
+	unsigned int cq_entry_enable, unsigned int message_enable,
+	unsigned int interrupt_offset, u64 message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h
new file mode 100644
index 0000000..7901ce2
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq_copy.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_CQ_COPY_H_
+#define _VNIC_CQ_COPY_H_
+
+#include "fcpio.h"
+
+static inline unsigned int vnic_cq_copy_service(
+	struct vnic_cq *cq,
+	int (*q_service)(struct vnic_dev *vdev,
+			 unsigned int index,
+			 struct fcpio_fw_req *desc),
+	unsigned int work_to_do)
+
+{
+	struct fcpio_fw_req *desc;
+	unsigned int work_done = 0;
+	u8 color;
+
+	desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
+		cq->ring.desc_size * cq->to_clean);
+	fcpio_color_dec(desc, &color);
+
+	while (color != cq->last_color) {
+
+		if ((*q_service)(cq->vdev, cq->index, desc))
+			break;
+
+		cq->to_clean++;
+		if (cq->to_clean == cq->ring.desc_count) {
+			cq->to_clean = 0;
+			cq->last_color = cq->last_color ? 0 : 1;
+		}
+
+		desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
+			cq->ring.desc_size * cq->to_clean);
+		fcpio_color_dec(desc, &color);
+
+		work_done++;
+		if (work_done >= work_to_do)
+			break;
+	}
+
+	return work_done;
+}
+
+#endif /* _VNIC_CQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
new file mode 100644
index 0000000..5667706
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -0,0 +1,690 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_dev.h"
+#include "vnic_stats.h"
+
+struct vnic_res {
+	void __iomem *vaddr;
+	unsigned int count;
+};
+
+struct vnic_dev {
+	void *priv;
+	struct pci_dev *pdev;
+	struct vnic_res res[RES_TYPE_MAX];
+	enum vnic_dev_intr_mode intr_mode;
+	struct vnic_devcmd __iomem *devcmd;
+	struct vnic_devcmd_notify *notify;
+	struct vnic_devcmd_notify notify_copy;
+	dma_addr_t notify_pa;
+	u32 *linkstatus;
+	dma_addr_t linkstatus_pa;
+	struct vnic_stats *stats;
+	dma_addr_t stats_pa;
+	struct vnic_devcmd_fw_info *fw_info;
+	dma_addr_t fw_info_pa;
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+	(sizeof(struct vnic_resource_header) + \
+	sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE	128
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+	return vdev->priv;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+	struct vnic_dev_bar *bar)
+{
+	struct vnic_resource_header __iomem *rh;
+	struct vnic_resource __iomem *r;
+	u8 type;
+
+	if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+		printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
+		return -EINVAL;
+	}
+
+	rh = bar->vaddr;
+	if (!rh) {
+		printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
+		return -EINVAL;
+	}
+
+	if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
+	    ioread32(&rh->version) != VNIC_RES_VERSION) {
+		printk(KERN_ERR "vNIC BAR0 res magic/version error "
+			"exp (%lx/%lx) curr (%x/%x)\n",
+			VNIC_RES_MAGIC, VNIC_RES_VERSION,
+			ioread32(&rh->magic), ioread32(&rh->version));
+		return -EINVAL;
+	}
+
+	r = (struct vnic_resource __iomem *)(rh + 1);
+
+	while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+
+		u8 bar_num = ioread8(&r->bar);
+		u32 bar_offset = ioread32(&r->bar_offset);
+		u32 count = ioread32(&r->count);
+		u32 len;
+
+		r++;
+
+		if (bar_num != 0)  /* only mapping in BAR0 resources */
+			continue;
+
+		switch (type) {
+		case RES_TYPE_WQ:
+		case RES_TYPE_RQ:
+		case RES_TYPE_CQ:
+		case RES_TYPE_INTR_CTRL:
+			/* each count is stride bytes long */
+			len = count * VNIC_RES_STRIDE;
+			if (len + bar_offset > bar->len) {
+				printk(KERN_ERR "vNIC BAR0 resource %d "
+					"out-of-bounds, offset 0x%x + "
+					"size 0x%x > bar len 0x%lx\n",
+					type, bar_offset,
+					len,
+					bar->len);
+				return -EINVAL;
+			}
+			break;
+		case RES_TYPE_INTR_PBA_LEGACY:
+		case RES_TYPE_DEVCMD:
+			len = count;
+			break;
+		default:
+			continue;
+		}
+
+		vdev->res[type].count = count;
+		vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
+	}
+
+	return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+	enum vnic_res_type type)
+{
+	return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+	unsigned int index)
+{
+	if (!vdev->res[type].vaddr)
+		return NULL;
+
+	switch (type) {
+	case RES_TYPE_WQ:
+	case RES_TYPE_RQ:
+	case RES_TYPE_CQ:
+	case RES_TYPE_INTR_CTRL:
+		return (char __iomem *)vdev->res[type].vaddr +
+					index * VNIC_RES_STRIDE;
+	default:
+		return (char __iomem *)vdev->res[type].vaddr;
+	}
+}
+
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+				     unsigned int desc_count,
+				     unsigned int desc_size)
+{
+	/* The base address of the desc rings must be 512 byte aligned.
+	 * Descriptor count is aligned to groups of 32 descriptors.  A
+	 * count of 0 means the maximum 4096 descriptors.  Descriptor
+	 * size is aligned to 16 bytes.
+	 */
+
+	unsigned int count_align = 32;
+	unsigned int desc_align = 16;
+
+	ring->base_align = 512;
+
+	if (desc_count == 0)
+		desc_count = 4096;
+
+	ring->desc_count = ALIGN(desc_count, count_align);
+
+	ring->desc_size = ALIGN(desc_size, desc_align);
+
+	ring->size = ring->desc_count * ring->desc_size;
+	ring->size_unaligned = ring->size + ring->base_align;
+
+	return ring->size_unaligned;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+	memset(ring->descs, 0, ring->size);
+}
+
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	vnic_dev_desc_ring_size(ring, desc_count, desc_size);
+
+	ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+		ring->size_unaligned,
+		&ring->base_addr_unaligned);
+
+	if (!ring->descs_unaligned) {
+		printk(KERN_ERR
+		  "Failed to allocate ring (size=%d), aborting\n",
+			(int)ring->size);
+		return -ENOMEM;
+	}
+
+	ring->base_addr = ALIGN(ring->base_addr_unaligned,
+		ring->base_align);
+	ring->descs = (u8 *)ring->descs_unaligned +
+		(ring->base_addr - ring->base_addr_unaligned);
+
+	vnic_dev_clear_desc_ring(ring);
+
+	ring->desc_avail = ring->desc_count - 1;
+
+	return 0;
+}
+
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
+{
+	if (ring->descs) {
+		pci_free_consistent(vdev->pdev,
+			ring->size_unaligned,
+			ring->descs_unaligned,
+			ring->base_addr_unaligned);
+		ring->descs = NULL;
+	}
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+	u64 *a0, u64 *a1, int wait)
+{
+	struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+	int delay;
+	u32 status;
+	int dev_cmd_err[] = {
+		/* convert from fw's version of error.h to host's version */
+		0,	/* ERR_SUCCESS */
+		EINVAL,	/* ERR_EINVAL */
+		EFAULT,	/* ERR_EFAULT */
+		EPERM,	/* ERR_EPERM */
+		EBUSY,  /* ERR_EBUSY */
+	};
+	int err;
+
+	status = ioread32(&devcmd->status);
+	if (status & STAT_BUSY) {
+		printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
+		return -EBUSY;
+	}
+
+	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+		writeq(*a0, &devcmd->args[0]);
+		writeq(*a1, &devcmd->args[1]);
+		wmb();
+	}
+
+	iowrite32(cmd, &devcmd->cmd);
+
+	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+			return 0;
+
+	for (delay = 0; delay < wait; delay++) {
+
+		udelay(100);
+
+		status = ioread32(&devcmd->status);
+		if (!(status & STAT_BUSY)) {
+
+			if (status & STAT_ERROR) {
+				err = dev_cmd_err[(int)readq(&devcmd->args[0])];
+				printk(KERN_ERR "Error %d devcmd %d\n",
+					err, _CMD_N(cmd));
+				return -err;
+			}
+
+			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+				rmb();
+				*a0 = readq(&devcmd->args[0]);
+				*a1 = readq(&devcmd->args[1]);
+			}
+
+			return 0;
+		}
+	}
+
+	printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
+	return -ETIMEDOUT;
+}
+
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+	struct vnic_devcmd_fw_info **fw_info)
+{
+	u64 a0, a1 = 0;
+	int wait = 1000;
+	int err = 0;
+
+	if (!vdev->fw_info) {
+		vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_devcmd_fw_info),
+			&vdev->fw_info_pa);
+		if (!vdev->fw_info)
+			return -ENOMEM;
+
+		a0 = vdev->fw_info_pa;
+
+		/* only get fw_info once and cache it */
+		err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+	}
+
+	*fw_info = vdev->fw_info;
+
+	return err;
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+	void *value)
+{
+	u64 a0, a1;
+	int wait = 1000;
+	int err;
+
+	a0 = offset;
+	a1 = size;
+
+	err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+	switch (size) {
+	case 1:
+		*(u8 *)value = (u8)a0;
+		break;
+	case 2:
+		*(u16 *)value = (u16)a0;
+		break;
+	case 4:
+		*(u32 *)value = (u32)a0;
+		break;
+	case 8:
+		*(u64 *)value = a0;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	return err;
+}
+
+int vnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+	u64 a0, a1;
+	int wait = 1000;
+
+	if (!vdev->stats) {
+		vdev->stats = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_stats), &vdev->stats_pa);
+		if (!vdev->stats)
+			return -ENOMEM;
+	}
+
+	*stats = vdev->stats;
+	a0 = vdev->stats_pa;
+	a1 = sizeof(struct vnic_stats);
+
+	return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	*done = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	*done = (a0 == 0);
+
+	return 0;
+}
+
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
+}
+
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	*done = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	*done = (a0 == 0);
+
+	return 0;
+}
+
+int vnic_dev_hang_notify(struct vnic_dev *vdev)
+{
+	u64 a0, a1;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
+}
+
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+	u64 a0, a1;
+	int wait = 1000;
+	int err, i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = ((u8 *)&a0)[i];
+
+	return 0;
+}
+
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+	int broadcast, int promisc, int allmulti)
+{
+	u64 a0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+	     (multicast ? CMD_PFILTER_MULTICAST : 0) |
+	     (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+	     (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+	     (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR "Can't set packet filter\n");
+}
+
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		((u8 *)&a0)[i] = addr[i];
+
+	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR
+			"Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+			err);
+}
+
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		((u8 *)&a0)[i] = addr[i];
+
+	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+	if (err)
+		printk(KERN_ERR
+			"Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+			err);
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+	u64 a0, a1;
+	int wait = 1000;
+
+	if (!vdev->notify) {
+		vdev->notify = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_devcmd_notify),
+			&vdev->notify_pa);
+		if (!vdev->notify)
+			return -ENOMEM;
+	}
+
+	a0 = vdev->notify_pa;
+	a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+	a1 += sizeof(struct vnic_devcmd_notify);
+
+	return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+	u64 a0, a1;
+	int wait = 1000;
+
+	a0 = 0;  /* paddr = 0 to unset notify buffer */
+	a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+	a1 += sizeof(struct vnic_devcmd_notify);
+
+	vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+	u32 *words;
+	unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
+	unsigned int i;
+	u32 csum;
+
+	if (!vdev->notify)
+		return 0;
+
+	do {
+		csum = 0;
+		memcpy(&vdev->notify_copy, vdev->notify,
+			sizeof(struct vnic_devcmd_notify));
+		words = (u32 *)&vdev->notify_copy;
+		for (i = 1; i < nwords; i++)
+			csum += words[i];
+	} while (csum != words[0]);
+
+	return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+	if (vdev->linkstatus)
+		return *vdev->linkstatus;
+
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.link_state;
+}
+
+u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.port_speed;
+}
+
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.msglvl;
+}
+
+u32 vnic_dev_mtu(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.mtu;
+}
+
+u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.link_down_cnt;
+}
+
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+	enum vnic_dev_intr_mode intr_mode)
+{
+	vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
+	struct vnic_dev *vdev)
+{
+	return vdev->intr_mode;
+}
+
+void vnic_dev_unregister(struct vnic_dev *vdev)
+{
+	if (vdev) {
+		if (vdev->notify)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_devcmd_notify),
+				vdev->notify,
+				vdev->notify_pa);
+		if (vdev->linkstatus)
+			pci_free_consistent(vdev->pdev,
+				sizeof(u32),
+				vdev->linkstatus,
+				vdev->linkstatus_pa);
+		if (vdev->stats)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_dev),
+				vdev->stats, vdev->stats_pa);
+		if (vdev->fw_info)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_devcmd_fw_info),
+				vdev->fw_info, vdev->fw_info_pa);
+		kfree(vdev);
+	}
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+	void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
+{
+	if (!vdev) {
+		vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
+		if (!vdev)
+			return NULL;
+	}
+
+	vdev->priv = priv;
+	vdev->pdev = pdev;
+
+	if (vnic_dev_discover_res(vdev, bar))
+		goto err_out;
+
+	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+	if (!vdev->devcmd)
+		goto err_out;
+
+	return vdev;
+
+err_out:
+	vnic_dev_unregister(vdev);
+	return NULL;
+}
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
new file mode 100644
index 0000000..f9935a8
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_dev_priv fnic_dev_priv
+#define vnic_dev_get_res_count fnic_dev_get_res_count
+#define vnic_dev_get_res fnic_dev_get_res
+#define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz
+#define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring
+#define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring
+#define vnic_dev_free_desc_ring fnic_dev_free_desc_ring
+#define vnic_dev_cmd fnic_dev_cmd
+#define vnic_dev_fw_info fnic_dev_fw_info
+#define vnic_dev_spec fnic_dev_spec
+#define vnic_dev_stats_clear fnic_dev_stats_clear
+#define vnic_dev_stats_dump fnic_dev_stats_dump
+#define vnic_dev_hang_notify fnic_dev_hang_notify
+#define vnic_dev_packet_filter fnic_dev_packet_filter
+#define vnic_dev_add_addr fnic_dev_add_addr
+#define vnic_dev_del_addr fnic_dev_del_addr
+#define vnic_dev_mac_addr fnic_dev_mac_addr
+#define vnic_dev_notify_set fnic_dev_notify_set
+#define vnic_dev_notify_unset fnic_dev_notify_unset
+#define vnic_dev_link_status fnic_dev_link_status
+#define vnic_dev_port_speed fnic_dev_port_speed
+#define vnic_dev_msg_lvl fnic_dev_msg_lvl
+#define vnic_dev_mtu fnic_dev_mtu
+#define vnic_dev_link_down_cnt fnic_dev_link_down_cnt
+#define vnic_dev_close fnic_dev_close
+#define vnic_dev_enable fnic_dev_enable
+#define vnic_dev_disable fnic_dev_disable
+#define vnic_dev_open fnic_dev_open
+#define vnic_dev_open_done fnic_dev_open_done
+#define vnic_dev_init fnic_dev_init
+#define vnic_dev_soft_reset fnic_dev_soft_reset
+#define vnic_dev_soft_reset_done fnic_dev_soft_reset_done
+#define vnic_dev_set_intr_mode fnic_dev_set_intr_mode
+#define vnic_dev_get_intr_mode fnic_dev_get_intr_mode
+#define vnic_dev_unregister fnic_dev_unregister
+#define vnic_dev_register fnic_dev_register
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET	0x0000000000000000ULL
+#endif
+
+#ifndef readq
+static inline u64 readq(void __iomem *reg)
+{
+	return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
+}
+
+static inline void writeq(u64 val, void __iomem *reg)
+{
+	writel(val & 0xffffffff, reg);
+	writel(val >> 32, reg + 0x4UL);
+}
+#endif
+
+enum vnic_dev_intr_mode {
+	VNIC_DEV_INTR_MODE_UNKNOWN,
+	VNIC_DEV_INTR_MODE_INTX,
+	VNIC_DEV_INTR_MODE_MSI,
+	VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+	void __iomem *vaddr;
+	dma_addr_t bus_addr;
+	unsigned long len;
+};
+
+struct vnic_dev_ring {
+	void *descs;
+	size_t size;
+	dma_addr_t base_addr;
+	size_t base_align;
+	void *descs_unaligned;
+	size_t size_unaligned;
+	dma_addr_t base_addr_unaligned;
+	unsigned int desc_size;
+	unsigned int desc_count;
+	unsigned int desc_avail;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+				    enum vnic_res_type type);
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+			       unsigned int index);
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+				     unsigned int desc_count,
+				     unsigned int desc_size);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+			     unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+			     struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+		 u64 *a0, u64 *a1, int wait);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+		     struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
+		  unsigned int size, void *value);
+int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+			    int broadcast, int promisc, int allmulti);
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+u32 vnic_dev_port_speed(struct vnic_dev *vdev);
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
+u32 vnic_dev_mtu(struct vnic_dev *vdev);
+u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+			    enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+void vnic_dev_unregister(struct vnic_dev *vdev);
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+				   void *priv, struct pci_dev *pdev,
+				   struct vnic_dev_bar *bar);
+
+#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
new file mode 100644
index 0000000..d62b906
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS      14
+#define _CMD_VTYPEBITS	10
+#define _CMD_FLAGSBITS  6
+#define _CMD_DIRBITS	2
+
+#define _CMD_NMASK      ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK  ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK  ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK    ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT     0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT   (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE   0U
+#define _CMD_DIR_WRITE  1U
+#define _CMD_DIR_READ   2U
+#define _CMD_DIR_RW     (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE  0U
+#define _CMD_VTYPE_ENET  1U
+#define _CMD_VTYPE_FC    2U
+#define _CMD_VTYPE_SCSI  4U
+#define _CMD_VTYPE_ALL   (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+*/
+#define _CMDCF(dir, flags, vtype, nr)  \
+	(((dir)   << _CMD_DIRSHIFT) | \
+	((flags) << _CMD_FLAGSSHIFT) | \
+	((vtype) << _CMD_VTYPESHIFT) | \
+	((nr)    << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr)    _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr)  _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+*/
+#define _CMD_DIR(cmd)            (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd)          (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd)          (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd)              (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+enum vnic_devcmd_cmd {
+	CMD_NONE                = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+	/* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
+	CMD_MCPU_FW_INFO        = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+	/* dev-specific block member:
+	 *    in: (u16)a0=offset,(u8)a1=size
+	 *    out: a0=value */
+	CMD_DEV_SPEC            = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+	/* stats clear */
+	CMD_STATS_CLEAR         = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+	/* stats dump in mem: (u64)a0=paddr to stats area,
+	 *                    (u16)a1=sizeof stats area */
+	CMD_STATS_DUMP          = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+	/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+	CMD_PACKET_FILTER	= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+
+	/* hang detection notification */
+	CMD_HANG_NOTIFY         = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+	/* MAC address in (u48)a0 */
+	CMD_MAC_ADDR            = _CMDC(_CMD_DIR_READ,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+
+	/* disable/enable promisc mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+	CMD_PROMISC_MODE        = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
+
+	/* disable/enable all-multi mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+	CMD_ALLMULTI_MODE       = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
+
+	/* add addr from (u48)a0 */
+	CMD_ADDR_ADD            = _CMDCNW(_CMD_DIR_WRITE,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+	/* del addr from (u48)a0 */
+	CMD_ADDR_DEL            = _CMDCNW(_CMD_DIR_WRITE,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+	/* add VLAN id in (u16)a0 */
+	CMD_VLAN_ADD            = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+	/* del VLAN id in (u16)a0 */
+	CMD_VLAN_DEL            = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+	/* nic_cfg in (u32)a0 */
+	CMD_NIC_CFG             = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+	/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+	CMD_RSS_KEY             = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+	/* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+	CMD_RSS_CPU             = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+	/* initiate softreset */
+	CMD_SOFT_RESET          = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+	/* softreset status:
+	 *    out: a0=0 reset complete, a0=1 reset in progress */
+	CMD_SOFT_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+	/* set struct vnic_devcmd_notify buffer in mem:
+	 * in:
+	 *   (u64)a0=paddr to notify (set paddr=0 to unset)
+	 *   (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+	 *   (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+	 * out:
+	 *   (u32)a1 = effective size
+	 */
+	CMD_NOTIFY              = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+	/* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
+	 *           (u8)a1=PXENV_UNDI_xxx */
+	CMD_UNDI                = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+	/* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+	CMD_OPEN		= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+	/* open status:
+	 *    out: a0=0 open complete, a0=1 open in progress */
+	CMD_OPEN_STATUS		= _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+	/* close vnic */
+	CMD_CLOSE		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+	/* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+	CMD_INIT		= _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+	/* variant of CMD_INIT, with provisioning info
+	 *     (u64)a0=paddr of vnic_devcmd_provinfo
+	 *     (u32)a1=sizeof provision info */
+	CMD_INIT_PROV_INFO	= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+	/* enable virtual link */
+	CMD_ENABLE		= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+	/* disable virtual link */
+	CMD_DISABLE		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+	/* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
+	CMD_STATS_DUMP_ALL	= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+	/* init status:
+	 *    out: a0=0 init complete, a0=1 init in progress
+	 *         if a0=0, a1=errno */
+	CMD_INIT_STATUS		= _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+	/* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+	 *            (u8)a1=INT13_CMD_xxx */
+	CMD_INT13               = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+	/* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+	CMD_LOGICAL_UPLINK      = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+	/* undo initialize of virtual link */
+	CMD_DEINIT		= _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+};
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM		0x1	/* open coming from option rom */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC	0x1	/* init with default mac addr */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED		0x01
+#define CMD_PFILTER_MULTICAST		0x02
+#define CMD_PFILTER_BROADCAST		0x04
+#define CMD_PFILTER_PROMISCUOUS		0x08
+#define CMD_PFILTER_ALL_MULTICAST	0x10
+
+enum vnic_devcmd_status {
+	STAT_NONE = 0,
+	STAT_BUSY = 1 << 0,	/* cmd in progress */
+	STAT_ERROR = 1 << 1,	/* last cmd caused error (code in a0) */
+};
+
+enum vnic_devcmd_error {
+	ERR_SUCCESS = 0,
+	ERR_EINVAL = 1,
+	ERR_EFAULT = 2,
+	ERR_EPERM = 3,
+	ERR_EBUSY = 4,
+	ERR_ECMDUNKNOWN = 5,
+	ERR_EBADSTATE = 6,
+	ERR_ENOMEM = 7,
+	ERR_ETIMEDOUT = 8,
+	ERR_ELINKDOWN = 9,
+};
+
+struct vnic_devcmd_fw_info {
+	char fw_version[32];
+	char fw_build[32];
+	char hw_version[32];
+	char hw_serial_number[32];
+};
+
+struct vnic_devcmd_notify {
+	u32 csum;		/* checksum over following words */
+
+	u32 link_state;		/* link up == 1 */
+	u32 port_speed;		/* effective port speed (rate limit) */
+	u32 mtu;		/* MTU */
+	u32 msglvl;		/* requested driver msg lvl */
+	u32 uif;		/* uplink interface */
+	u32 status;		/* status bits (see VNIC_STF_*) */
+	u32 error;		/* error code (see ERR_*) for first ERR */
+	u32 link_down_cnt;	/* running count of link down transitions */
+};
+#define VNIC_STF_FATAL_ERR	0x0001	/* fatal fw error */
+
+struct vnic_devcmd_provinfo {
+	u8 oui[3];
+	u8 type;
+	u8 data[0];
+};
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only.  While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+	u32 status;			/* RO */
+	u32 cmd;			/* RW */
+	u64 args[VNIC_DEVCMD_NARGS];	/* RW cmd args (little-endian) */
+};
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c
new file mode 100644
index 0000000..4f4dc87
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+	intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+	unsigned int index)
+{
+	intr->index = index;
+	intr->vdev = vdev;
+
+	intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+	if (!intr->ctrl) {
+		printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
+			index);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+	unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+	iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+	iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+	iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+	iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+	iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h
new file mode 100644
index 0000000..d5fb40e
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_intr_unmask fnic_intr_unmask
+#define vnic_intr_mask fnic_intr_mask
+#define vnic_intr_return_credits fnic_intr_return_credits
+#define vnic_intr_credits fnic_intr_credits
+#define vnic_intr_return_all_credits fnic_intr_return_all_credits
+#define vnic_intr_legacy_pba fnic_intr_legacy_pba
+#define vnic_intr_free fnic_intr_free
+#define vnic_intr_alloc fnic_intr_alloc
+#define vnic_intr_init fnic_intr_init
+#define vnic_intr_clean fnic_intr_clean
+
+#define VNIC_INTR_TIMER_MAX		0xffff
+
+#define VNIC_INTR_TIMER_TYPE_ABS	0
+#define VNIC_INTR_TIMER_TYPE_QUIET	1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+	u32 coalescing_timer;		/* 0x00 */
+	u32 pad0;
+	u32 coalescing_value;		/* 0x08 */
+	u32 pad1;
+	u32 coalescing_type;		/* 0x10 */
+	u32 pad2;
+	u32 mask_on_assertion;		/* 0x18 */
+	u32 pad3;
+	u32 mask;			/* 0x20 */
+	u32 pad4;
+	u32 int_credits;		/* 0x28 */
+	u32 pad5;
+	u32 int_credit_return;		/* 0x30 */
+	u32 pad6;
+};
+
+struct vnic_intr {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_intr_ctrl __iomem *ctrl;	/* memory-mapped */
+};
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+	iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+	iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+	unsigned int credits, int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT		16
+#define VNIC_INTR_RESET_TIMER_SHIFT	17
+
+	u32 int_credit_return = (credits & 0xffff) |
+		(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+		(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+	iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
+{
+	return ioread32(&intr->ctrl->int_credits);
+}
+
+static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+	unsigned int credits = vnic_intr_credits(intr);
+	int unmask = 1;
+	int reset_timer = 1;
+
+	vnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
+static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
+{
+	/* read PBA without clearing */
+	return ioread32(legacy_pba);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+	unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+	unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h
new file mode 100644
index 0000000..f15b83e
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_nic.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_set_nic_cfg fnic_set_nic_cfg
+
+#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD	0xffUL
+#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT		0
+#define NIC_CFG_RSS_HASH_TYPE			(0xffUL << 8)
+#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD	0xffUL
+#define NIC_CFG_RSS_HASH_TYPE_SHIFT		8
+#define NIC_CFG_RSS_HASH_BITS			(7UL << 16)
+#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD	7UL
+#define NIC_CFG_RSS_HASH_BITS_SHIFT		16
+#define NIC_CFG_RSS_BASE_CPU			(7UL << 19)
+#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD		7UL
+#define NIC_CFG_RSS_BASE_CPU_SHIFT		19
+#define NIC_CFG_RSS_ENABLE			(1UL << 22)
+#define NIC_CFG_RSS_ENABLE_MASK_FIELD		1UL
+#define NIC_CFG_RSS_ENABLE_SHIFT		22
+#define NIC_CFG_TSO_IPID_SPLIT_EN		(1UL << 23)
+#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD	1UL
+#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT		23
+#define NIC_CFG_IG_VLAN_STRIP_EN		(1UL << 24)
+#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD	1UL
+#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT		24
+
+static inline void vnic_set_nic_cfg(u32 *nic_cfg,
+	u8 rss_default_cpu, u8 rss_hash_type,
+	u8 rss_hash_bits, u8 rss_base_cpu,
+	u8 rss_enable, u8 tso_ipid_split_en,
+	u8 ig_vlan_strip_en)
+{
+	*nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+		((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+			<< NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+		((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+			<< NIC_CFG_RSS_HASH_BITS_SHIFT) |
+		((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+			<< NIC_CFG_RSS_BASE_CPU_SHIFT) |
+		((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+			<< NIC_CFG_RSS_ENABLE_SHIFT) |
+		((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+			<< NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+		((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+			<< NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
new file mode 100644
index 0000000..2d842f7
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC		0x766E6963L	/* 'vnic' */
+#define VNIC_RES_VERSION	0x00000000L
+
+/* vNIC resource types */
+enum vnic_res_type {
+	RES_TYPE_EOL,			/* End-of-list */
+	RES_TYPE_WQ,			/* Work queues */
+	RES_TYPE_RQ,			/* Receive queues */
+	RES_TYPE_CQ,			/* Completion queues */
+	RES_TYPE_RSVD1,
+	RES_TYPE_NIC_CFG,		/* Enet NIC config registers */
+	RES_TYPE_RSVD2,
+	RES_TYPE_RSVD3,
+	RES_TYPE_RSVD4,
+	RES_TYPE_RSVD5,
+	RES_TYPE_INTR_CTRL,		/* Interrupt ctrl table */
+	RES_TYPE_INTR_TABLE,		/* MSI/MSI-X Interrupt table */
+	RES_TYPE_INTR_PBA,		/* MSI/MSI-X PBA table */
+	RES_TYPE_INTR_PBA_LEGACY,	/* Legacy intr status */
+	RES_TYPE_RSVD6,
+	RES_TYPE_RSVD7,
+	RES_TYPE_DEVCMD,		/* Device command region */
+	RES_TYPE_PASS_THRU_PAGE,	/* Pass-thru page */
+
+	RES_TYPE_MAX,			/* Count of resource types */
+};
+
+struct vnic_resource_header {
+	u32 magic;
+	u32 version;
+};
+
+struct vnic_resource {
+	u8 type;
+	u8 bar;
+	u8 pad[2];
+	u32 bar_offset;
+	u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
new file mode 100644
index 0000000..bedd0d2
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
+{
+	struct vnic_rq_buf *buf;
+	struct vnic_dev *vdev;
+	unsigned int i, j, count = rq->ring.desc_count;
+	unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
+
+	vdev = rq->vdev;
+
+	for (i = 0; i < blks; i++) {
+		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
+		if (!rq->bufs[i]) {
+			printk(KERN_ERR "Failed to alloc rq_bufs\n");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < blks; i++) {
+		buf = rq->bufs[i];
+		for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
+			buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
+			buf->desc = (u8 *)rq->ring.descs +
+				rq->ring.desc_size * buf->index;
+			if (buf->index + 1 == count) {
+				buf->next = rq->bufs[0];
+				break;
+			} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
+				buf->next = rq->bufs[i + 1];
+			} else {
+				buf->next = buf + 1;
+				buf++;
+			}
+		}
+	}
+
+	rq->to_use = rq->to_clean = rq->bufs[0];
+	rq->buf_index = 0;
+
+	return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq)
+{
+	struct vnic_dev *vdev;
+	unsigned int i;
+
+	vdev = rq->vdev;
+
+	vnic_dev_free_desc_ring(vdev, &rq->ring);
+
+	for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
+		kfree(rq->bufs[i]);
+		rq->bufs[i] = NULL;
+	}
+
+	rq->ctrl = NULL;
+}
+
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	rq->index = index;
+	rq->vdev = vdev;
+
+	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
+	if (!rq->ctrl) {
+		printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_rq_disable(rq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	err = vnic_rq_alloc_bufs(rq);
+	if (err) {
+		vnic_rq_free(rq);
+		return err;
+	}
+
+	return 0;
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset)
+{
+	u64 paddr;
+	u32 fetch_index;
+
+	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &rq->ctrl->ring_base);
+	iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
+	iowrite32(cq_index, &rq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
+	iowrite32(0, &rq->ctrl->dropped_packet_count);
+	iowrite32(0, &rq->ctrl->error_status);
+
+	/* Use current fetch_index as the ring starting point */
+	fetch_index = ioread32(&rq->ctrl->fetch_index);
+	rq->to_use = rq->to_clean =
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+	iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+	rq->buf_index = 0;
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+	return ioread32(&rq->ctrl->error_status);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+	iowrite32(1, &rq->ctrl->enable);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &rq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&rq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
+
+	return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq,
+	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
+{
+	struct vnic_rq_buf *buf;
+	u32 fetch_index;
+
+	BUG_ON(ioread32(&rq->ctrl->enable));
+
+	buf = rq->to_clean;
+
+	while (vnic_rq_desc_used(rq) > 0) {
+
+		(*buf_clean)(rq, buf);
+
+		buf = rq->to_clean = buf->next;
+		rq->ring.desc_avail++;
+	}
+
+	/* Use current fetch_index as the ring starting point */
+	fetch_index = ioread32(&rq->ctrl->fetch_index);
+	rq->to_use = rq->to_clean =
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+	iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+	rq->buf_index = 0;
+
+	vnic_dev_clear_desc_ring(&rq->ring);
+}
+
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h
new file mode 100644
index 0000000..aebdfbd
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_rq_desc_avail fnic_rq_desc_avail
+#define vnic_rq_desc_used fnic_rq_desc_used
+#define vnic_rq_next_desc fnic_rq_next_desc
+#define vnic_rq_next_index fnic_rq_next_index
+#define vnic_rq_next_buf_index fnic_rq_next_buf_index
+#define vnic_rq_post fnic_rq_post
+#define vnic_rq_posting_soon fnic_rq_posting_soon
+#define vnic_rq_return_descs fnic_rq_return_descs
+#define vnic_rq_service fnic_rq_service
+#define vnic_rq_fill fnic_rq_fill
+#define vnic_rq_free fnic_rq_free
+#define vnic_rq_alloc fnic_rq_alloc
+#define vnic_rq_init fnic_rq_init
+#define vnic_rq_error_status fnic_rq_error_status
+#define vnic_rq_enable fnic_rq_enable
+#define vnic_rq_disable fnic_rq_disable
+#define vnic_rq_clean fnic_rq_clean
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+	u64 ring_base;			/* 0x00 */
+	u32 ring_size;			/* 0x08 */
+	u32 pad0;
+	u32 posted_index;		/* 0x10 */
+	u32 pad1;
+	u32 cq_index;			/* 0x18 */
+	u32 pad2;
+	u32 enable;			/* 0x20 */
+	u32 pad3;
+	u32 running;			/* 0x28 */
+	u32 pad4;
+	u32 fetch_index;		/* 0x30 */
+	u32 pad5;
+	u32 error_interrupt_enable;	/* 0x38 */
+	u32 pad6;
+	u32 error_interrupt_offset;	/* 0x40 */
+	u32 pad7;
+	u32 error_status;		/* 0x48 */
+	u32 pad8;
+	u32 dropped_packet_count;	/* 0x50 */
+	u32 pad9;
+	u32 dropped_packet_count_rc;	/* 0x58 */
+	u32 pad10;
+};
+
+/* Break the vnic_rq_buf allocations into blocks of 64 entries */
+#define VNIC_RQ_BUF_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_SZ \
+	(VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
+#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
+	DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_rq_buf {
+	struct vnic_rq_buf *next;
+	dma_addr_t dma_addr;
+	void *os_buf;
+	unsigned int os_buf_index;
+	unsigned int len;
+	unsigned int index;
+	void *desc;
+};
+
+struct vnic_rq {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_rq_ctrl __iomem *ctrl;	/* memory-mapped */
+	struct vnic_dev_ring ring;
+	struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
+	struct vnic_rq_buf *to_use;
+	struct vnic_rq_buf *to_clean;
+	void *os_buf_head;
+	unsigned int buf_index;
+	unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+	/* how many does SW own? */
+	return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+	/* how many does HW own? */
+	return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
+{
+	return rq->to_use->desc;
+}
+
+static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
+{
+	return rq->to_use->index;
+}
+
+static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
+{
+	return rq->buf_index++;
+}
+
+static inline void vnic_rq_post(struct vnic_rq *rq,
+	void *os_buf, unsigned int os_buf_index,
+	dma_addr_t dma_addr, unsigned int len)
+{
+	struct vnic_rq_buf *buf = rq->to_use;
+
+	buf->os_buf = os_buf;
+	buf->os_buf_index = os_buf_index;
+	buf->dma_addr = dma_addr;
+	buf->len = len;
+
+	buf = buf->next;
+	rq->to_use = buf;
+	rq->ring.desc_avail--;
+
+	/* Move the posted_index every nth descriptor
+	 */
+
+#ifndef VNIC_RQ_RETURN_RATE
+#define VNIC_RQ_RETURN_RATE		0xf	/* keep 2^n - 1 */
+#endif
+
+	if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
+		/* Adding write memory barrier prevents compiler and/or CPU
+		 * reordering, thus avoiding descriptor posting before
+		 * descriptor is initialized. Otherwise, hardware can read
+		 * stale descriptor fields.
+		 */
+		wmb();
+		iowrite32(buf->index, &rq->ctrl->posted_index);
+	}
+}
+
+static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
+{
+	return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
+}
+
+static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
+{
+	rq->ring.desc_avail += count;
+}
+
+enum desc_return_options {
+	VNIC_RQ_RETURN_DESC,
+	VNIC_RQ_DEFER_RETURN_DESC,
+};
+
+static inline void vnic_rq_service(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, u16 completed_index,
+	int desc_return, void (*buf_service)(struct vnic_rq *rq,
+	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+	int skipped, void *opaque), void *opaque)
+{
+	struct vnic_rq_buf *buf;
+	int skipped;
+
+	buf = rq->to_clean;
+	while (1) {
+
+		skipped = (buf->index != completed_index);
+
+		(*buf_service)(rq, cq_desc, buf, skipped, opaque);
+
+		if (desc_return == VNIC_RQ_RETURN_DESC)
+			rq->ring.desc_avail++;
+
+		rq->to_clean = buf->next;
+
+		if (!skipped)
+			break;
+
+		buf = rq->to_clean;
+	}
+}
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+	int (*buf_fill)(struct vnic_rq *rq))
+{
+	int err;
+
+	while (vnic_rq_desc_avail(rq) > 1) {
+
+		err = (*buf_fill)(rq);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq,
+	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
+
+#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
new file mode 100644
index 0000000..46baa52
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_SCSI_H_
+#define _VNIC_SCSI_H_
+
+#define VNIC_FNIC_WQ_COPY_COUNT_MIN         1
+#define VNIC_FNIC_WQ_COPY_COUNT_MAX         1
+
+#define VNIC_FNIC_WQ_DESCS_MIN              64
+#define VNIC_FNIC_WQ_DESCS_MAX              128
+
+#define VNIC_FNIC_WQ_COPY_DESCS_MIN         64
+#define VNIC_FNIC_WQ_COPY_DESCS_MAX         512
+
+#define VNIC_FNIC_RQ_DESCS_MIN              64
+#define VNIC_FNIC_RQ_DESCS_MAX              128
+
+#define VNIC_FNIC_EDTOV_MIN                 1000
+#define VNIC_FNIC_EDTOV_MAX                 255000
+#define VNIC_FNIC_EDTOV_DEF                 2000
+
+#define VNIC_FNIC_RATOV_MIN                 1000
+#define VNIC_FNIC_RATOV_MAX                 255000
+
+#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN      256
+#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX      2112
+
+#define VNIC_FNIC_FLOGI_RETRIES_MIN         0
+#define VNIC_FNIC_FLOGI_RETRIES_MAX         0xffffffff
+#define VNIC_FNIC_FLOGI_RETRIES_DEF         0xffffffff
+
+#define VNIC_FNIC_FLOGI_TIMEOUT_MIN         1000
+#define VNIC_FNIC_FLOGI_TIMEOUT_MAX         255000
+
+#define VNIC_FNIC_PLOGI_RETRIES_MIN         0
+#define VNIC_FNIC_PLOGI_RETRIES_MAX         255
+#define VNIC_FNIC_PLOGI_RETRIES_DEF         8
+
+#define VNIC_FNIC_PLOGI_TIMEOUT_MIN         1000
+#define VNIC_FNIC_PLOGI_TIMEOUT_MAX         255000
+
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN     256
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX     4096
+
+#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN     0
+#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX     240000
+
+#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN     0
+#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX     240000
+
+#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN  0
+#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX  255
+
+#define VNIC_FNIC_LUNS_PER_TARGET_MIN       1
+#define VNIC_FNIC_LUNS_PER_TARGET_MAX       1024
+
+/* Device-specific region: scsi configuration */
+struct vnic_fc_config {
+	u64 node_wwn;
+	u64 port_wwn;
+	u32 flags;
+	u32 wq_enet_desc_count;
+	u32 wq_copy_desc_count;
+	u32 rq_desc_count;
+	u32 flogi_retries;
+	u32 flogi_timeout;
+	u32 plogi_retries;
+	u32 plogi_timeout;
+	u32 io_throttle_count;
+	u32 link_down_timeout;
+	u32 port_down_timeout;
+	u32 port_down_io_retries;
+	u32 luns_per_tgt;
+	u16 maxdatafieldsize;
+	u16 ed_tov;
+	u16 ra_tov;
+	u16 intr_timer;
+	u8 intr_timer_type;
+};
+
+#define VFCF_FCP_SEQ_LVL_ERR	0x1	/* Enable FCP-2 Error Recovery */
+#define VFCF_PERBI		0x2	/* persistent binding info available */
+
+#endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h
new file mode 100644
index 0000000..5372e23
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_stats.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+	u64 tx_frames_ok;
+	u64 tx_unicast_frames_ok;
+	u64 tx_multicast_frames_ok;
+	u64 tx_broadcast_frames_ok;
+	u64 tx_bytes_ok;
+	u64 tx_unicast_bytes_ok;
+	u64 tx_multicast_bytes_ok;
+	u64 tx_broadcast_bytes_ok;
+	u64 tx_drops;
+	u64 tx_errors;
+	u64 tx_tso;
+	u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+	u64 rx_frames_ok;
+	u64 rx_frames_total;
+	u64 rx_unicast_frames_ok;
+	u64 rx_multicast_frames_ok;
+	u64 rx_broadcast_frames_ok;
+	u64 rx_bytes_ok;
+	u64 rx_unicast_bytes_ok;
+	u64 rx_multicast_bytes_ok;
+	u64 rx_broadcast_bytes_ok;
+	u64 rx_drop;
+	u64 rx_no_bufs;
+	u64 rx_errors;
+	u64 rx_rss;
+	u64 rx_crc_errors;
+	u64 rx_frames_64;
+	u64 rx_frames_127;
+	u64 rx_frames_255;
+	u64 rx_frames_511;
+	u64 rx_frames_1023;
+	u64 rx_frames_1518;
+	u64 rx_frames_to_max;
+	u64 rsvd[16];
+};
+
+struct vnic_stats {
+	struct vnic_tx_stats tx;
+	struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
new file mode 100644
index 0000000..1f9ea79
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+	struct vnic_wq_buf *buf;
+	struct vnic_dev *vdev;
+	unsigned int i, j, count = wq->ring.desc_count;
+	unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
+
+	vdev = wq->vdev;
+
+	for (i = 0; i < blks; i++) {
+		wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
+		if (!wq->bufs[i]) {
+			printk(KERN_ERR "Failed to alloc wq_bufs\n");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < blks; i++) {
+		buf = wq->bufs[i];
+		for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
+			buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
+			buf->desc = (u8 *)wq->ring.descs +
+				wq->ring.desc_size * buf->index;
+			if (buf->index + 1 == count) {
+				buf->next = wq->bufs[0];
+				break;
+			} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
+				buf->next = wq->bufs[i + 1];
+			} else {
+				buf->next = buf + 1;
+				buf++;
+			}
+		}
+	}
+
+	wq->to_use = wq->to_clean = wq->bufs[0];
+
+	return 0;
+}
+
+void vnic_wq_free(struct vnic_wq *wq)
+{
+	struct vnic_dev *vdev;
+	unsigned int i;
+
+	vdev = wq->vdev;
+
+	vnic_dev_free_desc_ring(vdev, &wq->ring);
+
+	for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
+		kfree(wq->bufs[i]);
+		wq->bufs[i] = NULL;
+	}
+
+	wq->ctrl = NULL;
+
+}
+
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	wq->index = index;
+	wq->vdev = vdev;
+
+	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+	if (!wq->ctrl) {
+		printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_wq_disable(wq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	err = vnic_wq_alloc_bufs(wq);
+	if (err) {
+		vnic_wq_free(wq);
+		return err;
+	}
+
+	return 0;
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset)
+{
+	u64 paddr;
+
+	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &wq->ctrl->ring_base);
+	iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(cq_index, &wq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+	iowrite32(0, &wq->ctrl->error_status);
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+	return ioread32(&wq->ctrl->error_status);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+	iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &wq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&wq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
+
+	return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq,
+	void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+{
+	struct vnic_wq_buf *buf;
+
+	BUG_ON(ioread32(&wq->ctrl->enable));
+
+	buf = wq->to_clean;
+
+	while (vnic_wq_desc_used(wq) > 0) {
+
+		(*buf_clean)(wq, buf);
+
+		buf = wq->to_clean = buf->next;
+		wq->ring.desc_avail++;
+	}
+
+	wq->to_use = wq->to_clean = wq->bufs[0];
+
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(0, &wq->ctrl->error_status);
+
+	vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
new file mode 100644
index 0000000..5cd094f
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_wq_desc_avail fnic_wq_desc_avail
+#define vnic_wq_desc_used fnic_wq_desc_used
+#define vnic_wq_next_desc fni_cwq_next_desc
+#define vnic_wq_post fnic_wq_post
+#define vnic_wq_service fnic_wq_service
+#define vnic_wq_free fnic_wq_free
+#define vnic_wq_alloc fnic_wq_alloc
+#define vnic_wq_init fnic_wq_init
+#define vnic_wq_error_status fnic_wq_error_status
+#define vnic_wq_enable fnic_wq_enable
+#define vnic_wq_disable fnic_wq_disable
+#define vnic_wq_clean fnic_wq_clean
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+	u64 ring_base;			/* 0x00 */
+	u32 ring_size;			/* 0x08 */
+	u32 pad0;
+	u32 posted_index;		/* 0x10 */
+	u32 pad1;
+	u32 cq_index;			/* 0x18 */
+	u32 pad2;
+	u32 enable;			/* 0x20 */
+	u32 pad3;
+	u32 running;			/* 0x28 */
+	u32 pad4;
+	u32 fetch_index;		/* 0x30 */
+	u32 pad5;
+	u32 dca_value;			/* 0x38 */
+	u32 pad6;
+	u32 error_interrupt_enable;	/* 0x40 */
+	u32 pad7;
+	u32 error_interrupt_offset;	/* 0x48 */
+	u32 pad8;
+	u32 error_status;		/* 0x50 */
+	u32 pad9;
+};
+
+struct vnic_wq_buf {
+	struct vnic_wq_buf *next;
+	dma_addr_t dma_addr;
+	void *os_buf;
+	unsigned int len;
+	unsigned int index;
+	int sop;
+	void *desc;
+};
+
+/* Break the vnic_wq_buf allocations into blocks of 64 entries */
+#define VNIC_WQ_BUF_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_SZ \
+	(VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+	DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_wq {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_wq_ctrl __iomem *ctrl;	/* memory-mapped */
+	struct vnic_dev_ring ring;
+	struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
+	struct vnic_wq_buf *to_use;
+	struct vnic_wq_buf *to_clean;
+	unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+	/* how many does SW own? */
+	return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+	/* how many does HW own? */
+	return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
+{
+	return wq->to_use->desc;
+}
+
+static inline void vnic_wq_post(struct vnic_wq *wq,
+	void *os_buf, dma_addr_t dma_addr,
+	unsigned int len, int sop, int eop)
+{
+	struct vnic_wq_buf *buf = wq->to_use;
+
+	buf->sop = sop;
+	buf->os_buf = eop ? os_buf : NULL;
+	buf->dma_addr = dma_addr;
+	buf->len = len;
+
+	buf = buf->next;
+	if (eop) {
+		/* Adding write memory barrier prevents compiler and/or CPU
+		 * reordering, thus avoiding descriptor posting before
+		 * descriptor is initialized. Otherwise, hardware can read
+		 * stale descriptor fields.
+		 */
+		wmb();
+		iowrite32(buf->index, &wq->ctrl->posted_index);
+	}
+	wq->to_use = buf;
+
+	wq->ring.desc_avail--;
+}
+
+static inline void vnic_wq_service(struct vnic_wq *wq,
+	struct cq_desc *cq_desc, u16 completed_index,
+	void (*buf_service)(struct vnic_wq *wq,
+	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
+	void *opaque)
+{
+	struct vnic_wq_buf *buf;
+
+	buf = wq->to_clean;
+	while (1) {
+
+		(*buf_service)(wq, cq_desc, buf, opaque);
+
+		wq->ring.desc_avail++;
+
+		wq->to_clean = buf->next;
+
+		if (buf->index == completed_index)
+			break;
+
+		buf = wq->to_clean;
+	}
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+	unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq,
+	void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+
+#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
new file mode 100644
index 0000000..9eab7e7
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_wq_copy.h"
+
+void vnic_wq_copy_enable(struct vnic_wq_copy *wq)
+{
+	iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_copy_disable(struct vnic_wq_copy *wq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &wq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&wq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	printk(KERN_ERR "Failed to disable Copy WQ[%d],"
+	       " fetch index=%d, posted_index=%d\n",
+	       wq->index, ioread32(&wq->ctrl->fetch_index),
+	       ioread32(&wq->ctrl->posted_index));
+
+	return -ENODEV;
+}
+
+void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
+	void (*q_clean)(struct vnic_wq_copy *wq,
+	struct fcpio_host_req *wq_desc))
+{
+	BUG_ON(ioread32(&wq->ctrl->enable));
+
+	if (vnic_wq_copy_desc_in_use(wq))
+		vnic_wq_copy_service(wq, -1, q_clean);
+
+	wq->to_use_index = wq->to_clean_index = 0;
+
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(0, &wq->ctrl->error_status);
+
+	vnic_dev_clear_desc_ring(&wq->ring);
+}
+
+void vnic_wq_copy_free(struct vnic_wq_copy *wq)
+{
+	struct vnic_dev *vdev;
+
+	vdev = wq->vdev;
+	vnic_dev_free_desc_ring(vdev, &wq->ring);
+	wq->ctrl = NULL;
+}
+
+int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
+		       unsigned int index, unsigned int desc_count,
+		       unsigned int desc_size)
+{
+	int err;
+
+	wq->index = index;
+	wq->vdev = vdev;
+	wq->to_use_index = wq->to_clean_index = 0;
+	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+	if (!wq->ctrl) {
+		printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_wq_copy_disable(wq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset)
+{
+	u64 paddr;
+
+	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &wq->ctrl->ring_base);
+	iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(cq_index, &wq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+}
+
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h
new file mode 100644
index 0000000..6aff974
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_WQ_COPY_H_
+#define _VNIC_WQ_COPY_H_
+
+#include <linux/pci.h>
+#include "vnic_wq.h"
+#include "fcpio.h"
+
+#define	VNIC_WQ_COPY_MAX 1
+
+struct vnic_wq_copy {
+	unsigned int index;
+	struct vnic_dev *vdev;
+	struct vnic_wq_ctrl __iomem *ctrl;	/* memory-mapped */
+	struct vnic_dev_ring ring;
+	unsigned to_use_index;
+	unsigned to_clean_index;
+};
+
+static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
+{
+	return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
+{
+	return wq->ring.desc_count - 1 - wq->ring.desc_avail;
+}
+
+static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
+{
+	struct fcpio_host_req *desc = wq->ring.descs;
+	return &desc[wq->to_use_index];
+}
+
+static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
+{
+
+	((wq->to_use_index + 1) == wq->ring.desc_count) ?
+		(wq->to_use_index = 0) : (wq->to_use_index++);
+	wq->ring.desc_avail--;
+
+	/* Adding write memory barrier prevents compiler and/or CPU
+	 * reordering, thus avoiding descriptor posting before
+	 * descriptor is initialized. Otherwise, hardware can read
+	 * stale descriptor fields.
+	 */
+	wmb();
+
+	iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
+}
+
+static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
+{
+	unsigned int cnt;
+
+	if (wq->to_clean_index <= index)
+		cnt = (index - wq->to_clean_index) + 1;
+	else
+		cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
+
+	wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
+	wq->ring.desc_avail += cnt;
+
+}
+
+static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
+	u16 completed_index,
+	void (*q_service)(struct vnic_wq_copy *wq,
+	struct fcpio_host_req *wq_desc))
+{
+	struct fcpio_host_req *wq_desc = wq->ring.descs;
+	unsigned int curr_index;
+
+	while (1) {
+
+		if (q_service)
+			(*q_service)(wq, &wq_desc[wq->to_clean_index]);
+
+		wq->ring.desc_avail++;
+
+		curr_index = wq->to_clean_index;
+
+		/* increment the to-clean index so that we start
+		 * with an unprocessed index next time we enter the loop
+		 */
+		((wq->to_clean_index + 1) == wq->ring.desc_count) ?
+			(wq->to_clean_index = 0) : (wq->to_clean_index++);
+
+		if (curr_index == completed_index)
+			break;
+
+		/* we have cleaned all the entries */
+		if ((completed_index == (u16)-1) &&
+		    (wq->to_clean_index == wq->to_use_index))
+			break;
+	}
+}
+
+void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
+int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
+void vnic_wq_copy_free(struct vnic_wq_copy *wq);
+int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
+	unsigned int index, unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
+	unsigned int error_interrupt_enable,
+	unsigned int error_interrupt_offset);
+void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
+	void (*q_clean)(struct vnic_wq_copy *wq,
+	struct fcpio_host_req *wq_desc));
+
+#endif /* _VNIC_WQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h
new file mode 100644
index 0000000..b121cba
--- /dev/null
+++ b/drivers/scsi/fnic/wq_enet_desc.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+	__le64 address;
+	__le16 length;
+	__le16 mss_loopback;
+	__le16 header_length_flags;
+	__le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS		64
+#define WQ_ENET_LEN_BITS		14
+#define WQ_ENET_LEN_MASK		((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS		14
+#define WQ_ENET_MSS_MASK		((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT		2
+#define WQ_ENET_LOOPBACK_SHIFT		1
+#define WQ_ENET_HDRLEN_BITS		10
+#define WQ_ENET_HDRLEN_MASK		((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS		2
+#define WQ_ENET_FLAGS_OM_MASK		((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT		12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT	13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT	14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT	15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM	0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED	1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4	2
+#define WQ_ENET_OFFLOAD_MODE_TSO	3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+	u64 address, u16 length, u16 mss, u16 header_length,
+	u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+	u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+	desc->address = cpu_to_le64(address);
+	desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+	desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+		WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+	desc->header_length_flags = cpu_to_le16(
+		(header_length & WQ_ENET_HDRLEN_MASK) |
+		(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+		(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+		(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+		(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+		(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+	desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+	u64 *address, u16 *length, u16 *mss, u16 *header_length,
+	u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+	u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+	*address = le64_to_cpu(desc->address);
+	*length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+	*mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+		WQ_ENET_MSS_MASK;
+	*loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+		WQ_ENET_LOOPBACK_SHIFT) & 1);
+	*header_length = le16_to_cpu(desc->header_length_flags) &
+		WQ_ENET_HDRLEN_MASK;
+	*offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+	*eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+	*cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+	*fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+	*vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+		WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+	*vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index babd4cc..36b1d10 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,7 +69,7 @@
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
 #define MPT2SAS_DRIVER_VERSION		"01.100.02.00"
-#define MPT2SAS_MAJOR_VERSION		00
+#define MPT2SAS_MAJOR_VERSION		01
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_BUILD_VERSION		02
 #define MPT2SAS_RELEASE_VERSION		00
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6f51ca4..e2b50d8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -425,6 +425,7 @@
 	INIT_LIST_HEAD(&starget->devices);
 	starget->state = STARGET_CREATED;
 	starget->scsi_level = SCSI_2;
+	starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
  retry:
 	spin_lock_irqsave(shost->host_lock, flags);
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 09479545..0a2ce7b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -357,7 +357,7 @@
 		err = 0;
 		break;
 	case ISCSI_SESSION_FAILED:
-		err = DID_TRANSPORT_DISRUPTED << 16;
+		err = DID_IMM_RETRY << 16;
 		break;
 	case ISCSI_SESSION_FREE:
 		err = DID_TRANSPORT_FAILFAST << 16;
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index e3a5ad5..cdc049d 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -665,7 +665,7 @@
 	.cons			= AMBA_CONSOLE,
 };
 
-static int pl010_probe(struct amba_device *dev, void *id)
+static int pl010_probe(struct amba_device *dev, struct amba_id *id)
 {
 	struct uart_amba_port *uap;
 	void __iomem *base;
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 8b2b970..88fdac5 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -729,7 +729,7 @@
 	.cons			= AMBA_CONSOLE,
 };
 
-static int pl011_probe(struct amba_device *dev, void *id)
+static int pl011_probe(struct amba_device *dev, struct amba_id *id)
 {
 	struct uart_amba_port *uap;
 	void __iomem *base;
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 6579e2b..a461b3b 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -1472,8 +1472,8 @@
 
 	free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
 	iounmap(icom_adapter->base_addr);
-	icom_free_adapter(icom_adapter);
 	pci_release_regions(icom_adapter->pci_dev);
+	icom_free_adapter(icom_adapter);
 }
 
 static void icom_kref_release(struct kref *kref)
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index d0b093b..5e38ba1 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -961,7 +961,7 @@
 
 		switch (trip_type) {
 		case THERMAL_TRIP_CRITICAL:
-			if (temp > trip_temp) {
+			if (temp >= trip_temp) {
 				if (tz->ops->notify)
 					ret = tz->ops->notify(tz, count,
 							      trip_type);
@@ -974,7 +974,7 @@
 			}
 			break;
 		case THERMAL_TRIP_HOT:
-			if (temp > trip_temp)
+			if (temp >= trip_temp)
 				if (tz->ops->notify)
 					tz->ops->notify(tz, count, trip_type);
 			break;
@@ -986,14 +986,14 @@
 
 				cdev = instance->cdev;
 
-				if (temp > trip_temp)
+				if (temp >= trip_temp)
 					cdev->ops->set_cur_state(cdev, 1);
 				else
 					cdev->ops->set_cur_state(cdev, 0);
 			}
 			break;
 		case THERMAL_TRIP_PASSIVE:
-			if (temp > trip_temp || tz->passive)
+			if (temp >= trip_temp || tz->passive)
 				thermal_zone_device_passive(tz, temp,
 							    trip_temp, count);
 			break;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0ab8474b..d9fcdae 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1487,14 +1487,7 @@
 
 	remove_sysfs_attrs(port);
 
-	/* all open ports are closed at this point
-	 *    (by usbserial.c:__serial_close, which calls ftdi_close)
-	 */
-
-	if (priv) {
-		usb_set_serial_port_data(port, NULL);
-		kref_put(&priv->kref, ftdi_sio_priv_release);
-	}
+	kref_put(&priv->kref, ftdi_sio_priv_release);
 
 	return 0;
 }
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 61050ab..d1f80ba 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -437,7 +437,7 @@
 	return ret;
 }
 
-static int clcdfb_probe(struct amba_device *dev, void *id)
+static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
 {
 	struct clcd_board *board = dev->dev.platform_data;
 	struct clcd_fb *fb;
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index dfb72f5e..148cbcc 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -880,20 +880,22 @@
 
 static int get_dss_clocks(void)
 {
-	if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) {
-		dev_err(dispc.fbdev->dev, "can't get dss_ick\n");
+	dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick");
+	if (IS_ERR(dispc.dss_ick)) {
+		dev_err(dispc.fbdev->dev, "can't get ick\n");
 		return PTR_ERR(dispc.dss_ick);
 	}
 
-	if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) {
+	dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck");
+	if (IS_ERR(dispc.dss1_fck)) {
 		dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
 		clk_put(dispc.dss_ick);
 		return PTR_ERR(dispc.dss1_fck);
 	}
 
-	if (IS_ERR((dispc.dss_54m_fck =
-				clk_get(dispc.fbdev->dev, "dss_54m_fck")))) {
-		dev_err(dispc.fbdev->dev, "can't get dss_54m_fck\n");
+	dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck");
+	if (IS_ERR(dispc.dss_54m_fck)) {
+		dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
 		clk_put(dispc.dss_ick);
 		clk_put(dispc.dss1_fck);
 		return PTR_ERR(dispc.dss_54m_fck);
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index a13c8dc..9332d6c 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -83,12 +83,14 @@
 
 static int rfbi_get_clocks(void)
 {
-	if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) {
-		dev_err(rfbi.fbdev->dev, "can't get dss_ick\n");
+	rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick");
+	if (IS_ERR(rfbi.dss_ick)) {
+		dev_err(rfbi.fbdev->dev, "can't get ick\n");
 		return PTR_ERR(rfbi.dss_ick);
 	}
 
-	if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) {
+	rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck");
+	if (IS_ERR(rfbi.dss1_fck)) {
 		dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
 		clk_put(rfbi.dss_ick);
 		return PTR_ERR(rfbi.dss1_fck);
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 92ea0ab..f10d2fb 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -47,6 +47,7 @@
 #endif
 	unsigned long lddckr;
 	struct sh_mobile_lcdc_chan ch[2];
+	int started;
 };
 
 /* shared registers */
@@ -451,6 +452,7 @@
 
 	/* start the lcdc */
 	sh_mobile_lcdc_start_stop(priv, 1);
+	priv->started = 1;
 
 	/* tell the board code to enable the panel */
 	for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -493,7 +495,10 @@
 	}
 
 	/* stop the lcdc */
-	sh_mobile_lcdc_start_stop(priv, 0);
+	if (priv->started) {
+		sh_mobile_lcdc_start_stop(priv, 0);
+		priv->started = 0;
+	}
 
 	/* stop clocks */
 	for (k = 0; k < ARRAY_SIZE(priv->ch); k++)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5759ba5..d062602 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -2475,7 +2475,7 @@
 			/* BB FIXME investigate remapping reserved chars here */
 			*symlinkinfo = cifs_strndup_from_ucs(data_start, count,
 						    is_unicode, nls_codepage);
-			if (!symlinkinfo)
+			if (!*symlinkinfo)
 				rc = -ENOMEM;
 		}
 	}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 11431ed..3758965d 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -225,6 +225,7 @@
 	if (!(oflags & FMODE_READ))
 		write_only = true;
 
+	mode &= ~current_umask();
 	rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode,
 			pnetfid, presp_data, &oplock, full_path,
 			cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
@@ -310,7 +311,6 @@
 		return -ENOMEM;
 	}
 
-	mode &= ~current_umask();
 	if (oplockEnabled)
 		oplock = REQ_OPLOCK;
 
@@ -336,7 +336,7 @@
 			else /* success, no need to query */
 				goto cifs_create_set_dentry;
 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
-			 (rc != -EOPNOTSUPP)) /* path not found or net err */
+			 (rc != -EOPNOTSUPP) && (rc != -EINVAL))
 			goto cifs_create_out;
 		/* else fallthrough to retry, using older open call, this is
 		   case where server does not support this SMB level, and
@@ -609,7 +609,6 @@
 	int xid;
 	int rc = 0; /* to get around spurious gcc warning, set to zero here */
 	int oplock = 0;
-	int mode;
 	__u16 fileHandle = 0;
 	bool posix_open = false;
 	struct cifs_sb_info *cifs_sb;
@@ -658,30 +657,36 @@
 	}
 	cFYI(1, ("Full path: %s inode = 0x%p", full_path, direntry->d_inode));
 
+	/* Posix open is only called (at lookup time) for file create now.
+	 * For opens (rather than creates), because we do not know if it
+	 * is a file or directory yet, and current Samba no longer allows
+	 * us to do posix open on dirs, we could end up wasting an open call
+	 * on what turns out to be a dir. For file opens, we wait to call posix
+	 * open till cifs_open.  It could be added here (lookup) in the future
+	 * but the performance tradeoff of the extra network request when EISDIR
+	 * or EACCES is returned would have to be weighed against the 50%
+	 * reduction in network traffic in the other paths.
+	 */
 	if (pTcon->unix_ext) {
 		if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
-				(nd->flags & LOOKUP_OPEN)) {
-			if (!((nd->intent.open.flags & O_CREAT) &&
-					(nd->intent.open.flags & O_EXCL))) {
-				mode = nd->intent.open.create_mode &
-						~current_umask();
-				rc = cifs_posix_open(full_path, &newInode,
-					parent_dir_inode->i_sb, mode,
+		     (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
+		     (nd->intent.open.flags & O_CREAT)) {
+			rc = cifs_posix_open(full_path, &newInode,
+					parent_dir_inode->i_sb,
+					nd->intent.open.create_mode,
 					nd->intent.open.flags, &oplock,
 					&fileHandle, xid);
-				/*
-				 * This code works around a bug in
-				 * samba posix open in samba versions 3.3.1
-				 * and earlier where create works
-				 * but open fails with invalid parameter.
-				 * If either of these error codes are
-				 * returned, follow the normal lookup.
-				 * Otherwise, the error during posix open
-				 * is handled.
-				 */
-				if ((rc != -EINVAL) && (rc != -EOPNOTSUPP))
-					posix_open = true;
-			}
+			/*
+			 * The check below works around a bug in POSIX
+			 * open in samba versions 3.3.1 and earlier where
+			 * open could incorrectly fail with invalid parameter.
+			 * If either that or op not supported returned, follow
+			 * the normal lookup.
+			 */
+			if ((rc == 0) || (rc == -ENOENT))
+				posix_open = true;
+			else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+				pTcon->broken_posix_open = true;
 		}
 		if (!posix_open)
 			rc = cifs_get_inode_info_unix(&newInode, full_path,
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 38c06f8..302ea15 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -130,10 +130,6 @@
 			struct cifsFileInfo *pCifsFile, int oplock, u16 netfid)
 {
 
-	file->private_data = kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
-	if (file->private_data == NULL)
-		return -ENOMEM;
-	pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
 	write_lock(&GlobalSMBSeslock);
 
 	pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
@@ -184,6 +180,38 @@
 	return 0;
 }
 
+static struct cifsFileInfo *
+cifs_fill_filedata(struct file *file)
+{
+	struct list_head *tmp;
+	struct cifsFileInfo *pCifsFile = NULL;
+	struct cifsInodeInfo *pCifsInode = NULL;
+
+	/* search inode for this file and fill in file->private_data */
+	pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
+	read_lock(&GlobalSMBSeslock);
+	list_for_each(tmp, &pCifsInode->openFileList) {
+		pCifsFile = list_entry(tmp, struct cifsFileInfo, flist);
+		if ((pCifsFile->pfile == NULL) &&
+		    (pCifsFile->pid == current->tgid)) {
+			/* mode set in cifs_create */
+
+			/* needed for writepage */
+			pCifsFile->pfile = file;
+			file->private_data = pCifsFile;
+			break;
+		}
+	}
+	read_unlock(&GlobalSMBSeslock);
+
+	if (file->private_data != NULL) {
+		return pCifsFile;
+	} else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL))
+			cERROR(1, ("could not find file instance for "
+				   "new file %p", file));
+	return NULL;
+}
+
 /* all arguments to this function must be checked for validity in caller */
 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
 	struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
@@ -258,7 +286,6 @@
 	struct cifsTconInfo *tcon;
 	struct cifsFileInfo *pCifsFile;
 	struct cifsInodeInfo *pCifsInode;
-	struct list_head *tmp;
 	char *full_path = NULL;
 	int desiredAccess;
 	int disposition;
@@ -270,32 +297,12 @@
 	cifs_sb = CIFS_SB(inode->i_sb);
 	tcon = cifs_sb->tcon;
 
-	/* search inode for this file and fill in file->private_data */
 	pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
-	read_lock(&GlobalSMBSeslock);
-	list_for_each(tmp, &pCifsInode->openFileList) {
-		pCifsFile = list_entry(tmp, struct cifsFileInfo,
-				       flist);
-		if ((pCifsFile->pfile == NULL) &&
-		    (pCifsFile->pid == current->tgid)) {
-			/* mode set in cifs_create */
-
-			/* needed for writepage */
-			pCifsFile->pfile = file;
-
-			file->private_data = pCifsFile;
-			break;
-		}
-	}
-	read_unlock(&GlobalSMBSeslock);
-
-	if (file->private_data != NULL) {
-		rc = 0;
+	pCifsFile = cifs_fill_filedata(file);
+	if (pCifsFile) {
 		FreeXid(xid);
-		return rc;
-	} else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL))
-			cERROR(1, ("could not find file instance for "
-				   "new file %p", file));
+		return 0;
+	}
 
 	full_path = build_path_from_dentry(file->f_path.dentry);
 	if (full_path == NULL) {
@@ -325,6 +332,7 @@
 			/* no need for special case handling of setting mode
 			   on read only files needed here */
 
+			pCifsFile = cifs_fill_filedata(file);
 			cifs_posix_open_inode_helper(inode, file, pCifsInode,
 						     pCifsFile, oplock, netfid);
 			goto out;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index ea9d11e..cd83c53 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -107,48 +107,48 @@
 cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
 {
 	struct inode *inode = direntry->d_inode;
-	int rc = -EACCES;
+	int rc = -ENOMEM;
 	int xid;
 	char *full_path = NULL;
-	char *target_path = ERR_PTR(-ENOMEM);
-	struct cifs_sb_info *cifs_sb;
-	struct cifsTconInfo *pTcon;
+	char *target_path = NULL;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+	struct cifsTconInfo *tcon = cifs_sb->tcon;
 
 	xid = GetXid();
 
-	full_path = build_path_from_dentry(direntry);
+	/*
+	 * For now, we just handle symlinks with unix extensions enabled.
+	 * Eventually we should handle NTFS reparse points, and MacOS
+	 * symlink support. For instance...
+	 *
+	 * rc = CIFSSMBQueryReparseLinkInfo(...)
+	 *
+	 * For now, just return -EACCES when the server doesn't support posix
+	 * extensions. Note that we still allow querying symlinks when posix
+	 * extensions are manually disabled. We could disable these as well
+	 * but there doesn't seem to be any harm in allowing the client to
+	 * read them.
+	 */
+	if (!(tcon->ses->capabilities & CAP_UNIX)) {
+		rc = -EACCES;
+		goto out;
+	}
 
+	full_path = build_path_from_dentry(direntry);
 	if (!full_path)
 		goto out;
 
 	cFYI(1, ("Full path: %s inode = 0x%p", full_path, inode));
-	cifs_sb = CIFS_SB(inode->i_sb);
-	pTcon = cifs_sb->tcon;
 
-	/* We could change this to:
-		if (pTcon->unix_ext)
-	   but there does not seem any point in refusing to
-	   get symlink info if we can, even if unix extensions
-	   turned off for this mount */
-
-	if (pTcon->ses->capabilities & CAP_UNIX)
-		rc = CIFSSMBUnixQuerySymLink(xid, pTcon, full_path,
-					     &target_path,
-					     cifs_sb->local_nls);
-	else {
-		/* BB add read reparse point symlink code here */
-		/* rc = CIFSSMBQueryReparseLinkInfo */
-		/* BB Add code to Query ReparsePoint info */
-		/* BB Add MAC style xsymlink check here if enabled */
-	}
-
+	rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path,
+				     cifs_sb->local_nls);
+	kfree(full_path);
+out:
 	if (rc != 0) {
 		kfree(target_path);
 		target_path = ERR_PTR(rc);
 	}
 
-	kfree(full_path);
-out:
 	FreeXid(xid);
 	nd_set_link(nd, target_path);
 	return NULL;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 370b190..89f98e9 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1943,7 +1943,8 @@
 		case S_IFREG:
 			/* NFSv4 has atomic_open... */
 			if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
-					&& (mask & MAY_OPEN))
+					&& (mask & MAY_OPEN)
+					&& !(mask & MAY_EXEC))
 				goto out;
 			break;
 		case S_IFDIR:
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 50ff3f2..d6759b9 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -576,7 +576,7 @@
 	ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
 
  out_free:
-	while (--n > 0)
+	while (--n >= 0)
 		vfree(kbufs[n]);
 	kfree(kbufs[4]);
 	return ret;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 1215a4f..3567fb9 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -448,13 +448,11 @@
 static void reiserfs_kill_sb(struct super_block *s)
 {
 	if (REISERFS_SB(s)) {
-#ifdef CONFIG_REISERFS_FS_XATTR
 		if (REISERFS_SB(s)->xattr_root) {
 			d_invalidate(REISERFS_SB(s)->xattr_root);
 			dput(REISERFS_SB(s)->xattr_root);
 			REISERFS_SB(s)->xattr_root = NULL;
 		}
-#endif
 		if (REISERFS_SB(s)->priv_root) {
 			d_invalidate(REISERFS_SB(s)->priv_root);
 			dput(REISERFS_SB(s)->priv_root);
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 2237e10..8e7deb0 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -123,7 +123,9 @@
 	mutex_lock_nested(&privroot->d_inode->i_mutex, I_MUTEX_XATTR);
 
 	xaroot = dget(REISERFS_SB(sb)->xattr_root);
-	if (!xaroot->d_inode) {
+	if (!xaroot)
+		xaroot = ERR_PTR(-ENODATA);
+	else if (!xaroot->d_inode) {
 		int err = -ENODATA;
 		if (xattr_may_create(flags))
 			err = xattr_mkdir(privroot->d_inode, xaroot, 0700);
@@ -685,20 +687,6 @@
 	return err;
 }
 
-/* Actual operations that are exported to VFS-land */
-struct xattr_handler *reiserfs_xattr_handlers[] = {
-	&reiserfs_xattr_user_handler,
-	&reiserfs_xattr_trusted_handler,
-#ifdef CONFIG_REISERFS_FS_SECURITY
-	&reiserfs_xattr_security_handler,
-#endif
-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
-	&reiserfs_posix_acl_access_handler,
-	&reiserfs_posix_acl_default_handler,
-#endif
-	NULL
-};
-
 /*
  * In order to implement different sets of xattr operations for each xattr
  * prefix with the generic xattr API, a filesystem should create a
@@ -883,23 +871,6 @@
 	return error;
 }
 
-int reiserfs_permission(struct inode *inode, int mask)
-{
-	/*
-	 * We don't do permission checks on the internal objects.
-	 * Permissions are determined by the "owning" object.
-	 */
-	if (IS_PRIVATE(inode))
-		return 0;
-	/*
-	 * Stat data v1 doesn't support ACLs.
-	 */
-	if (get_inode_sd_version(inode) == STAT_DATA_V1)
-		return generic_permission(inode, mask, NULL);
-	else
-		return generic_permission(inode, mask, reiserfs_check_acl);
-}
-
 static int create_privroot(struct dentry *dentry)
 {
 	int err;
@@ -922,6 +893,28 @@
 	return 0;
 }
 
+#else
+int __init reiserfs_xattr_register_handlers(void) { return 0; }
+void reiserfs_xattr_unregister_handlers(void) {}
+static int create_privroot(struct dentry *dentry) { return 0; }
+#endif
+
+/* Actual operations that are exported to VFS-land */
+struct xattr_handler *reiserfs_xattr_handlers[] = {
+#ifdef CONFIG_REISERFS_FS_XATTR
+	&reiserfs_xattr_user_handler,
+	&reiserfs_xattr_trusted_handler,
+#endif
+#ifdef CONFIG_REISERFS_FS_SECURITY
+	&reiserfs_xattr_security_handler,
+#endif
+#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+	&reiserfs_posix_acl_access_handler,
+	&reiserfs_posix_acl_default_handler,
+#endif
+	NULL
+};
+
 static int xattr_mount_check(struct super_block *s)
 {
 	/* We need generation numbers to ensure that the oid mapping is correct
@@ -941,10 +934,24 @@
 	return 0;
 }
 
-#else
-int __init reiserfs_xattr_register_handlers(void) { return 0; }
-void reiserfs_xattr_unregister_handlers(void) {}
+int reiserfs_permission(struct inode *inode, int mask)
+{
+	/*
+	 * We don't do permission checks on the internal objects.
+	 * Permissions are determined by the "owning" object.
+	 */
+	if (IS_PRIVATE(inode))
+		return 0;
+
+#ifdef CONFIG_REISERFS_FS_XATTR
+	/*
+	 * Stat data v1 doesn't support ACLs.
+	 */
+	if (get_inode_sd_version(inode) != STAT_DATA_V1)
+		return generic_permission(inode, mask, reiserfs_check_acl);
 #endif
+	return generic_permission(inode, mask, NULL);
+}
 
 /* This will catch lookups from the fs root to .reiserfs_priv */
 static int
@@ -992,7 +999,6 @@
 	int err = 0;
 	struct dentry *privroot = REISERFS_SB(s)->priv_root;
 
-#ifdef CONFIG_REISERFS_FS_XATTR
 	err = xattr_mount_check(s);
 	if (err)
 		goto error;
@@ -1023,14 +1029,11 @@
 		clear_bit(REISERFS_XATTRS_USER, &(REISERFS_SB(s)->s_mount_opt));
 		clear_bit(REISERFS_POSIXACL, &(REISERFS_SB(s)->s_mount_opt));
 	}
-#endif
 
 	/* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
 	if (reiserfs_posixacl(s))
 		s->s_flags |= MS_POSIXACL;
 	else
-#endif
 		s->s_flags &= ~MS_POSIXACL;
 
 	return err;
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index dbd6150..fc21844 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -42,7 +42,7 @@
 
 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
-#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
+#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
 
 /* Non-atomic variants, ie. preemption disabled and won't be touched
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 51e6e54..9b93caf 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -28,7 +28,7 @@
 
 struct amba_driver {
 	struct device_driver	drv;
-	int			(*probe)(struct amba_device *, void *);
+	int			(*probe)(struct amba_device *, struct amba_id *);
 	int			(*remove)(struct amba_device *);
 	void			(*shutdown)(struct amba_device *);
 	int			(*suspend)(struct amba_device *, pm_message_t);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ff65fff..9fed365 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1109,7 +1109,7 @@
 
 extern void ide_fixstring(u8 *, const int, const int);
 
-int ide_busy_sleep(ide_hwif_t *, unsigned long, int);
+int ide_busy_sleep(ide_drive_t *, unsigned long, int);
 
 int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
 
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 186ec6a..a47c879 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1097,6 +1097,32 @@
 #define pfn_valid_within(pfn) (1)
 #endif
 
+#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+/*
+ * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
+ * associated with it or not. In FLATMEM, it is expected that holes always
+ * have valid memmap as long as there is valid PFNs either side of the hole.
+ * In SPARSEMEM, it is assumed that a valid section has a memmap for the
+ * entire section.
+ *
+ * However, an ARM, and maybe other embedded architectures in the future
+ * free memmap backing holes to save memory on the assumption the memmap is
+ * never used. The page_zone linkages are then broken even though pfn_valid()
+ * returns true. A walker of the full memmap must then do this additional
+ * check to ensure the memmap they are looking at is sane by making sure
+ * the zone and PFN linkages are still valid. This is expensive, but walkers
+ * of the full memmap are extremely rare.
+ */
+int memmap_valid_within(unsigned long pfn,
+					struct page *page, struct zone *zone);
+#else
+static inline int memmap_valid_within(unsigned long pfn,
+					struct page *page, struct zone *zone)
+{
+	return 1;
+}
+#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
 #endif /* !__GENERATING_BOUNDS.H */
 #endif /* !__ASSEMBLY__ */
 #endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 06ba90c..0f71812 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1406,7 +1406,7 @@
 #define PCI_DEVICE_ID_VIA_82C598_1	0x8598
 #define PCI_DEVICE_ID_VIA_838X_1	0xB188
 #define PCI_DEVICE_ID_VIA_83_87XX_1	0xB198
-#define PCI_DEVICE_ID_VIA_C409_IDE	0XC409
+#define PCI_DEVICE_ID_VIA_VX855_IDE	0xC409
 #define PCI_DEVICE_ID_VIA_ANON		0xFFFF
 
 #define PCI_VENDOR_ID_SIEMENS           0x110A
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h
index cdedc01..99928dc 100644
--- a/include/linux/reiserfs_xattr.h
+++ b/include/linux/reiserfs_xattr.h
@@ -41,6 +41,7 @@
 int reiserfs_lookup_privroot(struct super_block *sb);
 int reiserfs_delete_xattrs(struct inode *inode);
 int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
+int reiserfs_permission(struct inode *inode, int mask);
 
 #ifdef CONFIG_REISERFS_FS_XATTR
 #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir)
@@ -50,7 +51,6 @@
 		      const void *value, size_t size, int flags);
 ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
 int reiserfs_removexattr(struct dentry *dentry, const char *name);
-int reiserfs_permission(struct inode *inode, int mask);
 
 int reiserfs_xattr_get(struct inode *, const char *, void *, size_t);
 int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int);
@@ -117,8 +117,6 @@
 #define reiserfs_listxattr NULL
 #define reiserfs_removexattr NULL
 
-#define reiserfs_permission NULL
-
 static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
 {
 }
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index c9184f7..68a8d87 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -680,7 +680,7 @@
 		if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
 			result = 0;
 		else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
-			result = DID_TRANSPORT_DISRUPTED << 16;
+			result = DID_IMM_RETRY << 16;
 		else
 			result = DID_NO_CONNECT << 16;
 		break;
@@ -688,7 +688,7 @@
 		if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
 			result = DID_TRANSPORT_FAILFAST << 16;
 		else
-			result = DID_TRANSPORT_DISRUPTED << 16;
+			result = DID_IMM_RETRY << 16;
 		break;
 	default:
 		result = DID_NO_CONNECT << 16;
diff --git a/kernel/futex.c b/kernel/futex.c
index eef8cd2..d546b2d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -193,6 +193,7 @@
  * @uaddr: virtual address of the futex
  * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
  * @key: address where result is stored.
+ * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
  *
  * Returns a negative error code or 0
  * The key words are stored in *key on success.
@@ -203,7 +204,8 @@
  *
  * lock_page() might sleep, the caller should not hold a spinlock.
  */
-static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
+static int
+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 {
 	unsigned long address = (unsigned long)uaddr;
 	struct mm_struct *mm = current->mm;
@@ -226,7 +228,7 @@
 	 *        but access_ok() should be faster than find_vma()
 	 */
 	if (!fshared) {
-		if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
+		if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
 			return -EFAULT;
 		key->private.mm = mm;
 		key->private.address = address;
@@ -235,7 +237,7 @@
 	}
 
 again:
-	err = get_user_pages_fast(address, 1, 0, &page);
+	err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
 	if (err < 0)
 		return err;
 
@@ -677,7 +679,7 @@
 	if (!bitset)
 		return -EINVAL;
 
-	ret = get_futex_key(uaddr, fshared, &key);
+	ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
 	if (unlikely(ret != 0))
 		goto out;
 
@@ -723,10 +725,10 @@
 	int ret, op_ret;
 
 retry:
-	ret = get_futex_key(uaddr1, fshared, &key1);
+	ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
 	if (unlikely(ret != 0))
 		goto out;
-	ret = get_futex_key(uaddr2, fshared, &key2);
+	ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
 	if (unlikely(ret != 0))
 		goto out_put_key1;
 
@@ -814,10 +816,10 @@
 	int ret, drop_count = 0;
 
 retry:
-	ret = get_futex_key(uaddr1, fshared, &key1);
+	ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
 	if (unlikely(ret != 0))
 		goto out;
-	ret = get_futex_key(uaddr2, fshared, &key2);
+	ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_READ);
 	if (unlikely(ret != 0))
 		goto out_put_key1;
 
@@ -1140,7 +1142,7 @@
 	q.bitset = bitset;
 retry:
 	q.key = FUTEX_KEY_INIT;
-	ret = get_futex_key(uaddr, fshared, &q.key);
+	ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
 	if (unlikely(ret != 0))
 		goto out;
 
@@ -1330,7 +1332,7 @@
 	q.pi_state = NULL;
 retry:
 	q.key = FUTEX_KEY_INIT;
-	ret = get_futex_key(uaddr, fshared, &q.key);
+	ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
 	if (unlikely(ret != 0))
 		goto out;
 
@@ -1594,7 +1596,7 @@
 	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
 		return -EPERM;
 
-	ret = get_futex_key(uaddr, fshared, &key);
+	ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
 	if (unlikely(ret != 0))
 		goto out;
 
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index a2cc7e9..699a2ac 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -54,9 +54,9 @@
  * table (if it's not there yet), and we check it for lock order
  * conflicts and deadlocks.
  */
-#define MAX_LOCKDEP_ENTRIES	8192UL
+#define MAX_LOCKDEP_ENTRIES	16384UL
 
-#define MAX_LOCKDEP_CHAINS_BITS	14
+#define MAX_LOCKDEP_CHAINS_BITS	15
 #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
 
 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
diff --git a/kernel/panic.c b/kernel/panic.c
index 874ecf1..984b3ec 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -340,39 +340,44 @@
 }
 
 #ifdef WANT_WARN_ON_SLOWPATH
-void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
-{
+struct slowpath_args {
+	const char *fmt;
 	va_list args;
-	char function[KSYM_SYMBOL_LEN];
-	unsigned long caller = (unsigned long)__builtin_return_address(0);
+};
+
+static void warn_slowpath_common(const char *file, int line, void *caller, struct slowpath_args *args)
+{
 	const char *board;
 
-	sprint_symbol(function, caller);
-
 	printk(KERN_WARNING "------------[ cut here ]------------\n");
-	printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
-		line, function);
+	printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
 	board = dmi_get_system_info(DMI_PRODUCT_NAME);
 	if (board)
 		printk(KERN_WARNING "Hardware name: %s\n", board);
 
-	if (*fmt) {
-		va_start(args, fmt);
-		vprintk(fmt, args);
-		va_end(args);
-	}
+	if (args)
+		vprintk(args->fmt, args->args);
 
 	print_modules();
 	dump_stack();
 	print_oops_end_marker();
 	add_taint(TAINT_WARN);
 }
+
+void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
+{
+	struct slowpath_args args;
+
+	args.fmt = fmt;
+	va_start(args.args, fmt);
+	warn_slowpath_common(file, line, __builtin_return_address(0), &args);
+	va_end(args.args);
+}
 EXPORT_SYMBOL(warn_slowpath_fmt);
 
 void warn_slowpath_null(const char *file, int line)
 {
-	static const char *empty = "";
-	warn_slowpath_fmt(file, line, empty);
+	warn_slowpath_common(file, line, __builtin_return_address(0), NULL);
 }
 EXPORT_SYMBOL(warn_slowpath_null);
 #endif
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index e71ca9c..b0dc9e7 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -241,9 +241,9 @@
 
 	local_irq_disable();
 
-	sysdev_suspend(PMSG_FREEZE);
+	error = sysdev_suspend(PMSG_FREEZE);
 	if (error) {
-		printk(KERN_ERR "PM: Some devices failed to power down, "
+		printk(KERN_ERR "PM: Some system devices failed to power down, "
 			"aborting hibernation\n");
 		goto Enable_irqs;
 	}
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 819f17a..e1d16c9 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -38,7 +38,8 @@
  */
 unsigned long long __attribute__((weak)) sched_clock(void)
 {
-	return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
+	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
+					* (NSEC_PER_SEC / HZ);
 }
 
 static __read_mostly int sched_clock_running;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a884c09..cda81ec 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2380,7 +2380,7 @@
 	"# echo print-parent > /debug/tracing/trace_options\n"
 	"# echo 1 > /debug/tracing/tracing_enabled\n"
 	"# cat /debug/tracing/trace > /tmp/trace.txt\n"
-	"echo 0 > /debug/tracing/tracing_enabled\n"
+	"# echo 0 > /debug/tracing/tracing_enabled\n"
 ;
 
 static ssize_t
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 16ce8b9..f5b7d17 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -6,6 +6,7 @@
 
 
 #include <linux/stddef.h>
+#include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <linux/module.h>
 
@@ -72,3 +73,17 @@
 	*zone = zonelist_zone(z);
 	return z;
 }
+
+#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+int memmap_valid_within(unsigned long pfn,
+					struct page *page, struct zone *zone)
+{
+	if (page_to_pfn(page) != pfn)
+		return 0;
+
+	if (page_zone(page) != zone)
+		return 0;
+
+	return 1;
+}
+#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 30351f0..bb553c3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -94,12 +94,12 @@
 /*
  * The interval between `kupdate'-style writebacks
  */
-unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */
+unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
 
 /*
  * The longest time for which data is allowed to remain dirty
  */
-unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */
+unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
 
 /*
  * Flag that makes the machine dump writes/reads and block dirtyings.
@@ -770,7 +770,7 @@
 
 	sync_supers();
 
-	oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval);
+	oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
 	start_jif = jiffies;
 	next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
 	nr_to_write = global_page_state(NR_FILE_DIRTY) +
diff --git a/mm/rmap.c b/mm/rmap.c
index 1652166..23122af 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -14,7 +14,7 @@
  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
- * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
+ * Contributions by Hugh Dickins 2003, 2004
  */
 
 /*
diff --git a/mm/slob.c b/mm/slob.c
index a2d4ab3..f92e66d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -60,6 +60,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/swap.h> /* struct reclaim_state */
 #include <linux/cache.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -255,6 +256,8 @@
 
 static void slob_free_pages(void *b, int order)
 {
+	if (current->reclaim_state)
+		current->reclaim_state->reclaimed_slab += 1 << order;
 	free_pages((unsigned long)b, order);
 }
 
@@ -407,7 +410,7 @@
 		spin_unlock_irqrestore(&slob_lock, flags);
 		clear_slob_page(sp);
 		free_slob_page(sp);
-		free_page((unsigned long)b);
+		slob_free_pages(b, 0);
 		return;
 	}
 
diff --git a/mm/slub.c b/mm/slub.c
index 7ab54ec..65ffda5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/mm.h>
+#include <linux/swap.h> /* struct reclaim_state */
 #include <linux/module.h>
 #include <linux/bit_spinlock.h>
 #include <linux/interrupt.h>
@@ -1170,6 +1171,8 @@
 
 	__ClearPageSlab(page);
 	reset_page_mapcount(page);
+	if (current->reclaim_state)
+		current->reclaim_state->reclaimed_slab += pages;
 	__free_pages(page, order);
 }
 
@@ -1909,7 +1912,7 @@
 	 * Doh this slab cannot be placed using slub_max_order.
 	 */
 	order = slab_order(size, 1, MAX_ORDER, 1);
-	if (order <= MAX_ORDER)
+	if (order < MAX_ORDER)
 		return order;
 	return -ENOSYS;
 }
@@ -2522,6 +2525,7 @@
 static int __init setup_slub_max_order(char *str)
 {
 	get_option(&str, &slub_max_order);
+	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
 
 	return 1;
 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 66f6130..74d66db 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -509,22 +509,11 @@
 			continue;
 
 		page = pfn_to_page(pfn);
-#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES
-		/*
-		 * Ordinarily, memory holes in flatmem still have a valid
-		 * memmap for the PFN range. However, an architecture for
-		 * embedded systems (e.g. ARM) can free up the memmap backing
-		 * holes to save memory on the assumption the memmap is
-		 * never used. The page_zone linkages are then broken even
-		 * though pfn_valid() returns true. Skip the page if the
-		 * linkages are broken. Even if this test passed, the impact
-		 * is that the counters for the movable type are off but
-		 * fragmentation monitoring is likely meaningless on small
-		 * systems.
-		 */
-		if (page_zone(page) != zone)
+
+		/* Watch for unexpected holes punched in the memmap */
+		if (!memmap_valid_within(pfn, page, zone))
 			continue;
-#endif
+
 		mtype = get_pageblock_migratetype(page);
 
 		if (mtype < MIGRATE_TYPES)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 30b8877..5ee1a36 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -134,6 +134,10 @@
 		if (skb->protocol == htons(ETH_P_PAUSE))
 			goto drop;
 
+		/* If STP is turned off, then forward */
+		if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
+			goto forward;
+
 		if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
 			    NULL, br_handle_local_finish))
 			return NULL;	/* frame consumed by filter */
@@ -141,6 +145,7 @@
 			return skb;	/* continue processing */
 	}
 
+forward:
 	switch (p->state) {
 	case BR_STATE_FORWARDING:
 		rhook = rcu_dereference(br_should_route_hook);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 6e63ec3f..0660515 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -297,6 +297,9 @@
 {
 	int isroot = br_is_root_bridge(br);
 
+	if (br->stp_enabled != BR_KERNEL_STP)
+		return;
+
 	pr_info("%s: topology change detected, %s\n", br->dev->name,
 		isroot ? "propagating" : "sending tcn bpdu");
 
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9cc9f95..6d62d46 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,9 +66,9 @@
 
    NOTES.
 
-   * The stored value for avbps is scaled by 2^5, so that maximal
-     rate is ~1Gbit, avpps is scaled by 2^10.
-
+   * avbps is scaled by 2^5, avpps is scaled by 2^10.
+   * both values are reported as 32 bit unsigned values. bps can
+     overflow for fast links : max speed being 34360Mbit/sec
    * Minimal interval is HZ/4=250msec (it is the greatest common divisor
      for HZ=100 and HZ=1024 8)), maximal interval
      is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
@@ -86,9 +86,9 @@
 	spinlock_t		*stats_lock;
 	int			ewma_log;
 	u64			last_bytes;
+	u64			avbps;
 	u32			last_packets;
 	u32			avpps;
-	u32			avbps;
 	struct rcu_head		e_rcu;
 	struct rb_node		node;
 };
@@ -115,6 +115,7 @@
 	rcu_read_lock();
 	list_for_each_entry_rcu(e, &elist[idx].list, list) {
 		u64 nbytes;
+		u64 brate;
 		u32 npackets;
 		u32 rate;
 
@@ -125,9 +126,9 @@
 
 		nbytes = e->bstats->bytes;
 		npackets = e->bstats->packets;
-		rate = (nbytes - e->last_bytes)<<(7 - idx);
+		brate = (nbytes - e->last_bytes)<<(7 - idx);
 		e->last_bytes = nbytes;
-		e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log;
+		e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log;
 		e->rate_est->bps = (e->avbps+0xF)>>5;
 
 		rate = (npackets - e->last_packets)<<(12 - idx);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b5873bd..64f51ee 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -175,9 +175,13 @@
 void netpoll_poll(struct netpoll *np)
 {
 	struct net_device *dev = np->dev;
-	const struct net_device_ops *ops = dev->netdev_ops;
+	const struct net_device_ops *ops;
 
-	if (!dev || !netif_running(dev) || !ops->ndo_poll_controller)
+	if (!dev || !netif_running(dev))
+		return;
+
+	ops = dev->netdev_ops;
+	if (!ops->ndo_poll_controller)
 		return;
 
 	/* Process pending work on NIC */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d152394..e505b53 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2288,7 +2288,7 @@
 next_skb:
 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
 
-	if (abs_offset < block_limit) {
+	if (abs_offset < block_limit && !st->frag_data) {
 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
 		return block_limit - abs_offset;
 	}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9d26a3d..5b919f7 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -408,7 +408,7 @@
 
 config INET_LRO
 	bool "Large Receive Offload (ipv4/tcp)"
-
+	default y
 	---help---
 	  Support for Large Receive Offload (ipv4/tcp).
 
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 90d22ae..88bf051 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -139,6 +139,8 @@
 __be32 root_server_addr = NONE;	/* Address of NFS server */
 u8 root_server_path[256] = { 0, };	/* Path to mount as root */
 
+u32 ic_dev_xid;		/* Device under configuration */
+
 /* vendor class identifier */
 static char vendor_class_identifier[253] __initdata;
 
@@ -932,6 +934,13 @@
 		goto drop_unlock;
 	}
 
+	/* Is it a reply for the device we are configuring? */
+	if (b->xid != ic_dev_xid) {
+		if (net_ratelimit())
+			printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n");
+		goto drop_unlock;
+	}
+
 	/* Parse extensions */
 	if (ext_len >= 4 &&
 	    !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */
@@ -1115,6 +1124,9 @@
 	get_random_bytes(&timeout, sizeof(timeout));
 	timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
 	for (;;) {
+		/* Track the device we are configuring */
+		ic_dev_xid = d->xid;
+
 #ifdef IPCONFIG_BOOTP
 		if (do_bootp && (d->able & IC_BOOTP))
 			ic_bootp_send_if(d, jiffies - start_jiffies);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1d7f49c..7a0f0b2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1321,6 +1321,7 @@
 	struct task_struct *user_recv = NULL;
 	int copied_early = 0;
 	struct sk_buff *skb;
+	u32 urg_hole = 0;
 
 	lock_sock(sk);
 
@@ -1532,7 +1533,8 @@
 				}
 			}
 		}
-		if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
+		if ((flags & MSG_PEEK) &&
+		    (peek_seq - copied - urg_hole != tp->copied_seq)) {
 			if (net_ratelimit())
 				printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
 				       current->comm, task_pid_nr(current));
@@ -1553,6 +1555,7 @@
 				if (!urg_offset) {
 					if (!sock_flag(sk, SOCK_URGINLINE)) {
 						++*seq;
+						urg_hole++;
 						offset++;
 						used--;
 						if (!used)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index ec697ce..3b64182 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -303,6 +303,8 @@
 		switch (teql_resolve(skb, skb_res, slave)) {
 		case 0:
 			if (__netif_tx_trylock(slave_txq)) {
+				unsigned int length = qdisc_pkt_len(skb);
+
 				if (!netif_tx_queue_stopped(slave_txq) &&
 				    !netif_tx_queue_frozen(slave_txq) &&
 				    slave_ops->ndo_start_xmit(skb, slave) == 0) {
@@ -310,8 +312,7 @@
 					master->slaves = NEXT_SLAVE(q);
 					netif_wake_queue(dev);
 					master->stats.tx_packets++;
-					master->stats.tx_bytes +=
-						qdisc_pkt_len(skb);
+					master->stats.tx_bytes += length;
 					return 0;
 				}
 				__netif_tx_unlock(slave_txq);
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 32c8554..0079047 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -1,5 +1,13 @@
 #!/bin/sh
-# Print additional version information for non-release trees.
+#
+# This scripts adds local version information from the version
+# control systems git, mercurial (hg) and subversion (svn).
+#
+# If something goes wrong, send a mail the kernel build mailinglist
+# (see MAINTAINERS) and CC Nico Schottelius
+# <nico-linuxsetlocalversion -at- schottelius.org>.
+#
+#
 
 usage() {
 	echo "Usage: $0 [srctree]" >&2
@@ -10,12 +18,20 @@
 
 # Check for git and a git repo.
 if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
-	# Do we have an untagged tag?
-	if atag=`git describe 2>/dev/null`; then
-		echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
-	# add -g${head}, if there is no usable tag
-	else
-		printf '%s%s' -g $head
+
+	# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore it,
+	# because this version is defined in the top level Makefile.
+	if [ -z "`git describe --exact-match 2>/dev/null`" ]; then
+
+		# If we are past a tagged commit (like "v2.6.30-rc5-302-g72357d5"),
+		# we pretty print it.
+		if atag="`git describe 2>/dev/null`"; then
+			echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
+
+		# If we don't have a tag at all we print -g{commitish}.
+		else
+			printf '%s%s' -g $head
+		fi
 	fi
 
 	# Is this git on svn?
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 7fbd68f..5c48e36 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -1074,7 +1074,7 @@
 	return i;
 }
 
-static int __devinit aaci_probe(struct amba_device *dev, void *id)
+static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
 {
 	struct aaci *aaci;
 	int ret, i;
diff --git a/sound/drivers/pcsp/pcsp_mixer.c b/sound/drivers/pcsp/pcsp_mixer.c
index 771955a..199b033 100644
--- a/sound/drivers/pcsp/pcsp_mixer.c
+++ b/sound/drivers/pcsp/pcsp_mixer.c
@@ -51,7 +51,7 @@
 	if (uinfo->value.enumerated.item > chip->max_treble)
 		uinfo->value.enumerated.item = chip->max_treble;
 	sprintf(uinfo->value.enumerated.name, "%lu",
-			PCSP_CALC_RATE(uinfo->value.enumerated.item));
+		(unsigned long)PCSP_CALC_RATE(uinfo->value.enumerated.item));
 	return 0;
 }
 
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index 81bc93e..7337abd 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -958,10 +958,13 @@
 }
 
 static const struct snd_kcontrol_new snd_ac97_sigmatel_4speaker =
-AC97_SINGLE("Sigmatel 4-Speaker Stereo Playback Switch", AC97_SIGMATEL_DAC2INVERT, 2, 1, 0);
+AC97_SINGLE("Sigmatel 4-Speaker Stereo Playback Switch",
+		AC97_SIGMATEL_DAC2INVERT, 2, 1, 0);
 
+/* "Sigmatel " removed due to excessive name length: */
 static const struct snd_kcontrol_new snd_ac97_sigmatel_phaseinvert =
-AC97_SINGLE("Sigmatel Surround Phase Inversion Playback Switch", AC97_SIGMATEL_DAC2INVERT, 3, 1, 0);
+AC97_SINGLE("Surround Phase Inversion Playback Switch",
+		AC97_SIGMATEL_DAC2INVERT, 3, 1, 0);
 
 static const struct snd_kcontrol_new snd_ac97_sigmatel_controls[] = {
 AC97_SINGLE("Sigmatel DAC 6dB Attenuate", AC97_SIGMATEL_ANALOG, 1, 1, 0),
diff --git a/sound/pci/ca0106/ca0106_mixer.c b/sound/pci/ca0106/ca0106_mixer.c
index ad28887..c111efe 100644
--- a/sound/pci/ca0106/ca0106_mixer.c
+++ b/sound/pci/ca0106/ca0106_mixer.c
@@ -800,7 +800,7 @@
 		"Capture Volume",
 		"External Amplifier",
 		"Sigmatel 4-Speaker Stereo Playback Switch",
-		"Sigmatel Surround Phase Inversion Playback ",
+		"Surround Phase Inversion Playback Switch",
 		NULL
 	};
 	static char *ca0106_rename_ctls[] = {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b8a0d3e..bcbb736 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -12058,6 +12058,7 @@
 	SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
 	SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL),
 	SND_PCI_QUIRK(0x103c, 0x30cc, "TOSHIBA", ALC268_TOSHIBA),
+	SND_PCI_QUIRK(0x103c, 0x30f1, "HP TX25xx series", ALC268_TOSHIBA),
 	SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
 	SND_PCI_QUIRK(0x1179, 0xff10, "TOSHIBA A205", ALC268_TOSHIBA),
 	SND_PCI_QUIRK(0x1179, 0xff50, "TOSHIBA A305", ALC268_TOSHIBA),