Merge branch 'x86/amd-iommu' into x86/urgent
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index e6244cd..05c8064 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -2560,9 +2560,6 @@
 		 96 = /dev/usb/hiddev0	1st USB HID device
 		    ...
 		111 = /dev/usb/hiddev15	16th USB HID device
-		112 = /dev/usb/auer0	1st auerswald ISDN device
-		    ...
-		127 = /dev/usb/auer15	16th auerswald ISDN device
 		128 = /dev/usb/brlvgr0	First Braille Voyager device
 		    ...
 		131 = /dev/usb/brlvgr3	Fourth Braille Voyager device
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index 3bb5f46..1c6b545 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -105,7 +105,6 @@
 'T'	all	linux/soundcard.h	conflict!
 'T'	all	asm-i386/ioctls.h	conflict!
 'U'	00-EF	linux/drivers/usb/usb.h
-'U'	F0-FF	drivers/usb/auerswald.c
 'V'	all	linux/vt.h
 'W'	00-1F	linux/watchdog.h	conflict!
 'W'	00-1F	linux/wanrouter.h	conflict!
diff --git a/Documentation/usb/auerswald.txt b/Documentation/usb/auerswald.txt
deleted file mode 100644
index 7ee4d8f..0000000
--- a/Documentation/usb/auerswald.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-		Auerswald USB kernel driver
-		===========================
-
-What is it? What can I do with it?
-==================================
-The auerswald USB kernel driver connects your linux 2.4.x
-system to the auerswald usb-enabled devices.
-
-There are two types of auerswald usb devices:
-a) small PBX systems (ISDN)
-b) COMfort system telephones (ISDN)
-
-The driver installation creates the devices
-/dev/usb/auer0..15. These devices carry a vendor-
-specific protocol. You may run all auerswald java
-software on it. The java software needs a native
-library "libAuerUsbJNINative.so" installed on
-your system. This library is available from
-auerswald and shipped as part of the java software.
-
-You may create the devices with:
-	mknod -m 666 /dev/usb/auer0 c 180 112
-	...
-	mknod -m 666 /dev/usb/auer15 c 180 127
-
-Future plans
-============
-- Connection to ISDN4LINUX (the hisax interface)
-
-The maintainer of this driver is wolfgang@iksw-muees.de
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index b2fc4d4..9d31140 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -436,7 +436,12 @@
 suspend/resume events as well.
 
 If a driver wants to block all suspend/resume calls during some
-critical section, it can simply acquire udev->pm_mutex.
+critical section, it can simply acquire udev->pm_mutex. Note that
+calls to resume may be triggered indirectly. Block IO due to memory
+allocations can make the vm subsystem resume a device. Thus while
+holding this lock you must not allocate memory with GFP_KERNEL or
+GFP_NOFS.
+
 Alternatively, if the critical section might call some of the
 usb_autopm_* routines, the driver can avoid deadlock by doing:
 
diff --git a/MAINTAINERS b/MAINTAINERS
index af6aa4e..4c5e9fe 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2928,6 +2928,12 @@
 L:	linux-kernel@vger.kernel.org
 S:	Maintained
 
+MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
+P:     Felipe Balbi
+M:     felipe.balbi@nokia.com
+L:     linux-usb@vger.kernel.org
+S:     Maintained
+
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
 P:	Andrew Gallatin
 M:	gallatin@myri.com
@@ -3076,6 +3082,7 @@
 P:	Julian Anastasov
 M:	ja@ssi.bg
 L:	netdev@vger.kernel.org
+L:	lvs-devel@vger.kernel.org
 S:	Maintained
 
 NFS, SUNRPC, AND LOCKD CLIENTS
@@ -4195,12 +4202,6 @@
 L:	linux-usb@vger.kernel.org
 S:	Maintained
 
-USB AUERSWALD DRIVER
-P:	Wolfgang Muees
-M:	wolfgang@iksw-muees.de
-L:      linux-usb@vger.kernel.org
-S:	Maintained
-
 USB BLOCK DRIVER (UB ub)
 P:	Pete Zaitcev
 M:	zaitcev@redhat.com
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index 1607c94..10ef464 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -317,7 +317,6 @@
 		printk(error, 6, status);
 		return -ENODEV;
 	}
-	data->multipoint = 1;
 	tusb_device.dev.platform_data = data;
 
 	/* REVISIT let the driver know what DMA channels work */
diff --git a/include/asm-h8300/Kbuild b/arch/h8300/include/asm/Kbuild
similarity index 100%
rename from include/asm-h8300/Kbuild
rename to arch/h8300/include/asm/Kbuild
diff --git a/include/asm-h8300/a.out.h b/arch/h8300/include/asm/a.out.h
similarity index 100%
rename from include/asm-h8300/a.out.h
rename to arch/h8300/include/asm/a.out.h
diff --git a/include/asm-h8300/atomic.h b/arch/h8300/include/asm/atomic.h
similarity index 100%
rename from include/asm-h8300/atomic.h
rename to arch/h8300/include/asm/atomic.h
diff --git a/include/asm-h8300/auxvec.h b/arch/h8300/include/asm/auxvec.h
similarity index 100%
rename from include/asm-h8300/auxvec.h
rename to arch/h8300/include/asm/auxvec.h
diff --git a/include/asm-h8300/bitops.h b/arch/h8300/include/asm/bitops.h
similarity index 100%
rename from include/asm-h8300/bitops.h
rename to arch/h8300/include/asm/bitops.h
diff --git a/include/asm-h8300/bootinfo.h b/arch/h8300/include/asm/bootinfo.h
similarity index 100%
rename from include/asm-h8300/bootinfo.h
rename to arch/h8300/include/asm/bootinfo.h
diff --git a/include/asm-h8300/bug.h b/arch/h8300/include/asm/bug.h
similarity index 100%
rename from include/asm-h8300/bug.h
rename to arch/h8300/include/asm/bug.h
diff --git a/include/asm-h8300/bugs.h b/arch/h8300/include/asm/bugs.h
similarity index 100%
rename from include/asm-h8300/bugs.h
rename to arch/h8300/include/asm/bugs.h
diff --git a/include/asm-h8300/byteorder.h b/arch/h8300/include/asm/byteorder.h
similarity index 100%
rename from include/asm-h8300/byteorder.h
rename to arch/h8300/include/asm/byteorder.h
diff --git a/include/asm-h8300/cache.h b/arch/h8300/include/asm/cache.h
similarity index 100%
rename from include/asm-h8300/cache.h
rename to arch/h8300/include/asm/cache.h
diff --git a/include/asm-h8300/cachectl.h b/arch/h8300/include/asm/cachectl.h
similarity index 100%
rename from include/asm-h8300/cachectl.h
rename to arch/h8300/include/asm/cachectl.h
diff --git a/include/asm-h8300/cacheflush.h b/arch/h8300/include/asm/cacheflush.h
similarity index 100%
rename from include/asm-h8300/cacheflush.h
rename to arch/h8300/include/asm/cacheflush.h
diff --git a/include/asm-h8300/checksum.h b/arch/h8300/include/asm/checksum.h
similarity index 100%
rename from include/asm-h8300/checksum.h
rename to arch/h8300/include/asm/checksum.h
diff --git a/include/asm-h8300/cputime.h b/arch/h8300/include/asm/cputime.h
similarity index 100%
rename from include/asm-h8300/cputime.h
rename to arch/h8300/include/asm/cputime.h
diff --git a/include/asm-h8300/current.h b/arch/h8300/include/asm/current.h
similarity index 100%
rename from include/asm-h8300/current.h
rename to arch/h8300/include/asm/current.h
diff --git a/include/asm-h8300/dbg.h b/arch/h8300/include/asm/dbg.h
similarity index 100%
rename from include/asm-h8300/dbg.h
rename to arch/h8300/include/asm/dbg.h
diff --git a/include/asm-h8300/delay.h b/arch/h8300/include/asm/delay.h
similarity index 100%
rename from include/asm-h8300/delay.h
rename to arch/h8300/include/asm/delay.h
diff --git a/include/asm-h8300/device.h b/arch/h8300/include/asm/device.h
similarity index 100%
rename from include/asm-h8300/device.h
rename to arch/h8300/include/asm/device.h
diff --git a/include/asm-h8300/div64.h b/arch/h8300/include/asm/div64.h
similarity index 100%
rename from include/asm-h8300/div64.h
rename to arch/h8300/include/asm/div64.h
diff --git a/include/asm-h8300/dma.h b/arch/h8300/include/asm/dma.h
similarity index 100%
rename from include/asm-h8300/dma.h
rename to arch/h8300/include/asm/dma.h
diff --git a/include/asm-h8300/elf.h b/arch/h8300/include/asm/elf.h
similarity index 100%
rename from include/asm-h8300/elf.h
rename to arch/h8300/include/asm/elf.h
diff --git a/include/asm-h8300/emergency-restart.h b/arch/h8300/include/asm/emergency-restart.h
similarity index 100%
rename from include/asm-h8300/emergency-restart.h
rename to arch/h8300/include/asm/emergency-restart.h
diff --git a/include/asm-h8300/errno.h b/arch/h8300/include/asm/errno.h
similarity index 100%
rename from include/asm-h8300/errno.h
rename to arch/h8300/include/asm/errno.h
diff --git a/include/asm-h8300/fb.h b/arch/h8300/include/asm/fb.h
similarity index 100%
rename from include/asm-h8300/fb.h
rename to arch/h8300/include/asm/fb.h
diff --git a/include/asm-h8300/fcntl.h b/arch/h8300/include/asm/fcntl.h
similarity index 100%
rename from include/asm-h8300/fcntl.h
rename to arch/h8300/include/asm/fcntl.h
diff --git a/include/asm-h8300/flat.h b/arch/h8300/include/asm/flat.h
similarity index 100%
rename from include/asm-h8300/flat.h
rename to arch/h8300/include/asm/flat.h
diff --git a/include/asm-h8300/fpu.h b/arch/h8300/include/asm/fpu.h
similarity index 100%
rename from include/asm-h8300/fpu.h
rename to arch/h8300/include/asm/fpu.h
diff --git a/include/asm-h8300/futex.h b/arch/h8300/include/asm/futex.h
similarity index 100%
rename from include/asm-h8300/futex.h
rename to arch/h8300/include/asm/futex.h
diff --git a/include/asm-h8300/gpio.h b/arch/h8300/include/asm/gpio.h
similarity index 100%
rename from include/asm-h8300/gpio.h
rename to arch/h8300/include/asm/gpio.h
diff --git a/include/asm-h8300/hardirq.h b/arch/h8300/include/asm/hardirq.h
similarity index 100%
rename from include/asm-h8300/hardirq.h
rename to arch/h8300/include/asm/hardirq.h
diff --git a/include/asm-h8300/hw_irq.h b/arch/h8300/include/asm/hw_irq.h
similarity index 100%
rename from include/asm-h8300/hw_irq.h
rename to arch/h8300/include/asm/hw_irq.h
diff --git a/include/asm-h8300/io.h b/arch/h8300/include/asm/io.h
similarity index 100%
rename from include/asm-h8300/io.h
rename to arch/h8300/include/asm/io.h
diff --git a/include/asm-h8300/ioctl.h b/arch/h8300/include/asm/ioctl.h
similarity index 100%
rename from include/asm-h8300/ioctl.h
rename to arch/h8300/include/asm/ioctl.h
diff --git a/include/asm-h8300/ioctls.h b/arch/h8300/include/asm/ioctls.h
similarity index 100%
rename from include/asm-h8300/ioctls.h
rename to arch/h8300/include/asm/ioctls.h
diff --git a/include/asm-h8300/ipcbuf.h b/arch/h8300/include/asm/ipcbuf.h
similarity index 100%
rename from include/asm-h8300/ipcbuf.h
rename to arch/h8300/include/asm/ipcbuf.h
diff --git a/include/asm-h8300/irq.h b/arch/h8300/include/asm/irq.h
similarity index 100%
rename from include/asm-h8300/irq.h
rename to arch/h8300/include/asm/irq.h
diff --git a/include/asm-h8300/irq_regs.h b/arch/h8300/include/asm/irq_regs.h
similarity index 100%
rename from include/asm-h8300/irq_regs.h
rename to arch/h8300/include/asm/irq_regs.h
diff --git a/include/asm-h8300/kdebug.h b/arch/h8300/include/asm/kdebug.h
similarity index 100%
rename from include/asm-h8300/kdebug.h
rename to arch/h8300/include/asm/kdebug.h
diff --git a/include/asm-h8300/kmap_types.h b/arch/h8300/include/asm/kmap_types.h
similarity index 100%
rename from include/asm-h8300/kmap_types.h
rename to arch/h8300/include/asm/kmap_types.h
diff --git a/include/asm-h8300/linkage.h b/arch/h8300/include/asm/linkage.h
similarity index 100%
rename from include/asm-h8300/linkage.h
rename to arch/h8300/include/asm/linkage.h
diff --git a/include/asm-h8300/local.h b/arch/h8300/include/asm/local.h
similarity index 100%
rename from include/asm-h8300/local.h
rename to arch/h8300/include/asm/local.h
diff --git a/include/asm-h8300/mc146818rtc.h b/arch/h8300/include/asm/mc146818rtc.h
similarity index 100%
rename from include/asm-h8300/mc146818rtc.h
rename to arch/h8300/include/asm/mc146818rtc.h
diff --git a/include/asm-h8300/md.h b/arch/h8300/include/asm/md.h
similarity index 100%
rename from include/asm-h8300/md.h
rename to arch/h8300/include/asm/md.h
diff --git a/include/asm-h8300/mman.h b/arch/h8300/include/asm/mman.h
similarity index 100%
rename from include/asm-h8300/mman.h
rename to arch/h8300/include/asm/mman.h
diff --git a/include/asm-h8300/mmu.h b/arch/h8300/include/asm/mmu.h
similarity index 100%
rename from include/asm-h8300/mmu.h
rename to arch/h8300/include/asm/mmu.h
diff --git a/include/asm-h8300/mmu_context.h b/arch/h8300/include/asm/mmu_context.h
similarity index 100%
rename from include/asm-h8300/mmu_context.h
rename to arch/h8300/include/asm/mmu_context.h
diff --git a/include/asm-h8300/module.h b/arch/h8300/include/asm/module.h
similarity index 100%
rename from include/asm-h8300/module.h
rename to arch/h8300/include/asm/module.h
diff --git a/include/asm-h8300/msgbuf.h b/arch/h8300/include/asm/msgbuf.h
similarity index 100%
rename from include/asm-h8300/msgbuf.h
rename to arch/h8300/include/asm/msgbuf.h
diff --git a/include/asm-h8300/mutex.h b/arch/h8300/include/asm/mutex.h
similarity index 100%
rename from include/asm-h8300/mutex.h
rename to arch/h8300/include/asm/mutex.h
diff --git a/include/asm-h8300/page.h b/arch/h8300/include/asm/page.h
similarity index 100%
rename from include/asm-h8300/page.h
rename to arch/h8300/include/asm/page.h
diff --git a/include/asm-h8300/page_offset.h b/arch/h8300/include/asm/page_offset.h
similarity index 100%
rename from include/asm-h8300/page_offset.h
rename to arch/h8300/include/asm/page_offset.h
diff --git a/include/asm-h8300/param.h b/arch/h8300/include/asm/param.h
similarity index 100%
rename from include/asm-h8300/param.h
rename to arch/h8300/include/asm/param.h
diff --git a/include/asm-h8300/pci.h b/arch/h8300/include/asm/pci.h
similarity index 100%
rename from include/asm-h8300/pci.h
rename to arch/h8300/include/asm/pci.h
diff --git a/include/asm-h8300/percpu.h b/arch/h8300/include/asm/percpu.h
similarity index 100%
rename from include/asm-h8300/percpu.h
rename to arch/h8300/include/asm/percpu.h
diff --git a/include/asm-h8300/pgalloc.h b/arch/h8300/include/asm/pgalloc.h
similarity index 100%
rename from include/asm-h8300/pgalloc.h
rename to arch/h8300/include/asm/pgalloc.h
diff --git a/include/asm-h8300/pgtable.h b/arch/h8300/include/asm/pgtable.h
similarity index 100%
rename from include/asm-h8300/pgtable.h
rename to arch/h8300/include/asm/pgtable.h
diff --git a/include/asm-h8300/poll.h b/arch/h8300/include/asm/poll.h
similarity index 100%
rename from include/asm-h8300/poll.h
rename to arch/h8300/include/asm/poll.h
diff --git a/include/asm-h8300/posix_types.h b/arch/h8300/include/asm/posix_types.h
similarity index 100%
rename from include/asm-h8300/posix_types.h
rename to arch/h8300/include/asm/posix_types.h
diff --git a/include/asm-h8300/processor.h b/arch/h8300/include/asm/processor.h
similarity index 100%
rename from include/asm-h8300/processor.h
rename to arch/h8300/include/asm/processor.h
diff --git a/include/asm-h8300/ptrace.h b/arch/h8300/include/asm/ptrace.h
similarity index 100%
rename from include/asm-h8300/ptrace.h
rename to arch/h8300/include/asm/ptrace.h
diff --git a/include/asm-h8300/regs267x.h b/arch/h8300/include/asm/regs267x.h
similarity index 100%
rename from include/asm-h8300/regs267x.h
rename to arch/h8300/include/asm/regs267x.h
diff --git a/include/asm-h8300/regs306x.h b/arch/h8300/include/asm/regs306x.h
similarity index 100%
rename from include/asm-h8300/regs306x.h
rename to arch/h8300/include/asm/regs306x.h
diff --git a/include/asm-h8300/resource.h b/arch/h8300/include/asm/resource.h
similarity index 100%
rename from include/asm-h8300/resource.h
rename to arch/h8300/include/asm/resource.h
diff --git a/include/asm-h8300/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
similarity index 100%
rename from include/asm-h8300/scatterlist.h
rename to arch/h8300/include/asm/scatterlist.h
diff --git a/include/asm-h8300/sections.h b/arch/h8300/include/asm/sections.h
similarity index 100%
rename from include/asm-h8300/sections.h
rename to arch/h8300/include/asm/sections.h
diff --git a/include/asm-h8300/segment.h b/arch/h8300/include/asm/segment.h
similarity index 100%
rename from include/asm-h8300/segment.h
rename to arch/h8300/include/asm/segment.h
diff --git a/include/asm-h8300/sembuf.h b/arch/h8300/include/asm/sembuf.h
similarity index 100%
rename from include/asm-h8300/sembuf.h
rename to arch/h8300/include/asm/sembuf.h
diff --git a/include/asm-h8300/setup.h b/arch/h8300/include/asm/setup.h
similarity index 100%
rename from include/asm-h8300/setup.h
rename to arch/h8300/include/asm/setup.h
diff --git a/include/asm-h8300/sh_bios.h b/arch/h8300/include/asm/sh_bios.h
similarity index 100%
rename from include/asm-h8300/sh_bios.h
rename to arch/h8300/include/asm/sh_bios.h
diff --git a/include/asm-h8300/shm.h b/arch/h8300/include/asm/shm.h
similarity index 100%
rename from include/asm-h8300/shm.h
rename to arch/h8300/include/asm/shm.h
diff --git a/include/asm-h8300/shmbuf.h b/arch/h8300/include/asm/shmbuf.h
similarity index 100%
rename from include/asm-h8300/shmbuf.h
rename to arch/h8300/include/asm/shmbuf.h
diff --git a/include/asm-h8300/shmparam.h b/arch/h8300/include/asm/shmparam.h
similarity index 100%
rename from include/asm-h8300/shmparam.h
rename to arch/h8300/include/asm/shmparam.h
diff --git a/include/asm-h8300/sigcontext.h b/arch/h8300/include/asm/sigcontext.h
similarity index 100%
rename from include/asm-h8300/sigcontext.h
rename to arch/h8300/include/asm/sigcontext.h
diff --git a/include/asm-h8300/siginfo.h b/arch/h8300/include/asm/siginfo.h
similarity index 100%
rename from include/asm-h8300/siginfo.h
rename to arch/h8300/include/asm/siginfo.h
diff --git a/include/asm-h8300/signal.h b/arch/h8300/include/asm/signal.h
similarity index 100%
rename from include/asm-h8300/signal.h
rename to arch/h8300/include/asm/signal.h
diff --git a/include/asm-h8300/smp.h b/arch/h8300/include/asm/smp.h
similarity index 100%
rename from include/asm-h8300/smp.h
rename to arch/h8300/include/asm/smp.h
diff --git a/include/asm-h8300/socket.h b/arch/h8300/include/asm/socket.h
similarity index 100%
rename from include/asm-h8300/socket.h
rename to arch/h8300/include/asm/socket.h
diff --git a/include/asm-h8300/sockios.h b/arch/h8300/include/asm/sockios.h
similarity index 100%
rename from include/asm-h8300/sockios.h
rename to arch/h8300/include/asm/sockios.h
diff --git a/include/asm-h8300/spinlock.h b/arch/h8300/include/asm/spinlock.h
similarity index 100%
rename from include/asm-h8300/spinlock.h
rename to arch/h8300/include/asm/spinlock.h
diff --git a/include/asm-h8300/stat.h b/arch/h8300/include/asm/stat.h
similarity index 100%
rename from include/asm-h8300/stat.h
rename to arch/h8300/include/asm/stat.h
diff --git a/include/asm-h8300/statfs.h b/arch/h8300/include/asm/statfs.h
similarity index 100%
rename from include/asm-h8300/statfs.h
rename to arch/h8300/include/asm/statfs.h
diff --git a/include/asm-h8300/string.h b/arch/h8300/include/asm/string.h
similarity index 100%
rename from include/asm-h8300/string.h
rename to arch/h8300/include/asm/string.h
diff --git a/include/asm-h8300/system.h b/arch/h8300/include/asm/system.h
similarity index 100%
rename from include/asm-h8300/system.h
rename to arch/h8300/include/asm/system.h
diff --git a/include/asm-h8300/target_time.h b/arch/h8300/include/asm/target_time.h
similarity index 100%
rename from include/asm-h8300/target_time.h
rename to arch/h8300/include/asm/target_time.h
diff --git a/include/asm-h8300/termbits.h b/arch/h8300/include/asm/termbits.h
similarity index 100%
rename from include/asm-h8300/termbits.h
rename to arch/h8300/include/asm/termbits.h
diff --git a/include/asm-h8300/termios.h b/arch/h8300/include/asm/termios.h
similarity index 100%
rename from include/asm-h8300/termios.h
rename to arch/h8300/include/asm/termios.h
diff --git a/include/asm-h8300/thread_info.h b/arch/h8300/include/asm/thread_info.h
similarity index 100%
rename from include/asm-h8300/thread_info.h
rename to arch/h8300/include/asm/thread_info.h
diff --git a/include/asm-h8300/timex.h b/arch/h8300/include/asm/timex.h
similarity index 100%
rename from include/asm-h8300/timex.h
rename to arch/h8300/include/asm/timex.h
diff --git a/include/asm-h8300/tlb.h b/arch/h8300/include/asm/tlb.h
similarity index 100%
rename from include/asm-h8300/tlb.h
rename to arch/h8300/include/asm/tlb.h
diff --git a/include/asm-h8300/tlbflush.h b/arch/h8300/include/asm/tlbflush.h
similarity index 100%
rename from include/asm-h8300/tlbflush.h
rename to arch/h8300/include/asm/tlbflush.h
diff --git a/include/asm-h8300/topology.h b/arch/h8300/include/asm/topology.h
similarity index 100%
rename from include/asm-h8300/topology.h
rename to arch/h8300/include/asm/topology.h
diff --git a/include/asm-h8300/traps.h b/arch/h8300/include/asm/traps.h
similarity index 100%
rename from include/asm-h8300/traps.h
rename to arch/h8300/include/asm/traps.h
diff --git a/include/asm-h8300/types.h b/arch/h8300/include/asm/types.h
similarity index 100%
rename from include/asm-h8300/types.h
rename to arch/h8300/include/asm/types.h
diff --git a/include/asm-h8300/uaccess.h b/arch/h8300/include/asm/uaccess.h
similarity index 100%
rename from include/asm-h8300/uaccess.h
rename to arch/h8300/include/asm/uaccess.h
diff --git a/include/asm-h8300/ucontext.h b/arch/h8300/include/asm/ucontext.h
similarity index 100%
rename from include/asm-h8300/ucontext.h
rename to arch/h8300/include/asm/ucontext.h
diff --git a/include/asm-h8300/unaligned.h b/arch/h8300/include/asm/unaligned.h
similarity index 100%
rename from include/asm-h8300/unaligned.h
rename to arch/h8300/include/asm/unaligned.h
diff --git a/include/asm-h8300/unistd.h b/arch/h8300/include/asm/unistd.h
similarity index 100%
rename from include/asm-h8300/unistd.h
rename to arch/h8300/include/asm/unistd.h
diff --git a/include/asm-h8300/user.h b/arch/h8300/include/asm/user.h
similarity index 100%
rename from include/asm-h8300/user.h
rename to arch/h8300/include/asm/user.h
diff --git a/include/asm-h8300/virtconvert.h b/arch/h8300/include/asm/virtconvert.h
similarity index 100%
rename from include/asm-h8300/virtconvert.h
rename to arch/h8300/include/asm/virtconvert.h
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 3473e25..e3dd930 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -93,4 +93,8 @@
 void __trigger_all_cpu_backtrace(void);
 #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
 
+extern void *hardirq_stack[NR_CPUS];
+extern void *softirq_stack[NR_CPUS];
+#define __ARCH_HAS_DO_SOFTIRQ
+
 #endif
diff --git a/arch/sparc/include/asm/of_device.h b/arch/sparc/include/asm/of_device.h
index e5f5aed..bba777a 100644
--- a/arch/sparc/include/asm/of_device.h
+++ b/arch/sparc/include/asm/of_device.h
@@ -30,8 +30,7 @@
 extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
 extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
 
-/* These are just here during the transition */
-#include <linux/of_device.h>
+/* This is just here during the transition */
 #include <linux/of_platform.h>
 
 #endif /* __KERNEL__ */
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index ba43d85..9b6689d 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -682,10 +682,32 @@
 	       ino, virt_irq);
 }
 
+void *hardirq_stack[NR_CPUS];
+void *softirq_stack[NR_CPUS];
+
+static __attribute__((always_inline)) void *set_hardirq_stack(void)
+{
+	void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
+
+	__asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
+	if (orig_sp < sp ||
+	    orig_sp > (sp + THREAD_SIZE)) {
+		sp += THREAD_SIZE - 192 - STACK_BIAS;
+		__asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
+	}
+
+	return orig_sp;
+}
+static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
+{
+	__asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
+}
+
 void handler_irq(int irq, struct pt_regs *regs)
 {
 	unsigned long pstate, bucket_pa;
 	struct pt_regs *old_regs;
+	void *orig_sp;
 
 	clear_softint(1 << irq);
 
@@ -703,6 +725,8 @@
 			       "i" (PSTATE_IE)
 			     : "memory");
 
+	orig_sp = set_hardirq_stack();
+
 	while (bucket_pa) {
 		struct irq_desc *desc;
 		unsigned long next_pa;
@@ -719,10 +743,38 @@
 		bucket_pa = next_pa;
 	}
 
+	restore_hardirq_stack(orig_sp);
+
 	irq_exit();
 	set_irq_regs(old_regs);
 }
 
+void do_softirq(void)
+{
+	unsigned long flags;
+
+	if (in_interrupt())
+		return;
+
+	local_irq_save(flags);
+
+	if (local_softirq_pending()) {
+		void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+
+		sp += THREAD_SIZE - 192 - STACK_BIAS;
+
+		__asm__ __volatile__("mov %%sp, %0\n\t"
+				     "mov %1, %%sp"
+				     : "=&r" (orig_sp)
+				     : "r" (sp));
+		__do_softirq();
+		__asm__ __volatile__("mov %0, %%sp"
+				     : : "r" (orig_sp));
+	}
+
+	local_irq_restore(flags);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 void fixup_irqs(void)
 {
diff --git a/arch/sparc64/kernel/kstack.h b/arch/sparc64/kernel/kstack.h
new file mode 100644
index 0000000..4248d96
--- /dev/null
+++ b/arch/sparc64/kernel/kstack.h
@@ -0,0 +1,60 @@
+#ifndef _KSTACK_H
+#define _KSTACK_H
+
+#include <linux/thread_info.h>
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+
+/* SP must be STACK_BIAS adjusted already.  */
+static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
+{
+	unsigned long base = (unsigned long) tp;
+
+	if (sp >= (base + sizeof(struct thread_info)) &&
+	    sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
+		return true;
+
+	if (hardirq_stack[tp->cpu]) {
+		base = (unsigned long) hardirq_stack[tp->cpu];
+		if (sp >= base &&
+		    sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
+			return true;
+		base = (unsigned long) softirq_stack[tp->cpu];
+		if (sp >= base &&
+		    sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
+			return true;
+	}
+	return false;
+}
+
+/* Does "regs" point to a valid pt_regs trap frame?  */
+static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
+{
+	unsigned long base = (unsigned long) tp;
+	unsigned long addr = (unsigned long) regs;
+
+	if (addr >= base &&
+	    addr <= (base + THREAD_SIZE - sizeof(*regs)))
+		goto check_magic;
+
+	if (hardirq_stack[tp->cpu]) {
+		base = (unsigned long) hardirq_stack[tp->cpu];
+		if (addr >= base &&
+		    addr <= (base + THREAD_SIZE - sizeof(*regs)))
+			goto check_magic;
+		base = (unsigned long) softirq_stack[tp->cpu];
+		if (addr >= base &&
+		    addr <= (base + THREAD_SIZE - sizeof(*regs)))
+			goto check_magic;
+	}
+	return false;
+
+check_magic:
+	if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
+		return true;
+	return false;
+
+}
+
+#endif /* _KSTACK_H */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 7f5debd..15f4178 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -52,6 +52,8 @@
 #include <asm/irq_regs.h>
 #include <asm/smp.h>
 
+#include "kstack.h"
+
 static void sparc64_yield(int cpu)
 {
 	if (tlb_type != hypervisor)
@@ -235,19 +237,6 @@
 struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
 static DEFINE_SPINLOCK(global_reg_snapshot_lock);
 
-static bool kstack_valid(struct thread_info *tp, struct reg_window *rw)
-{
-	unsigned long thread_base, fp;
-
-	thread_base = (unsigned long) tp;
-	fp = (unsigned long) rw;
-
-	if (fp < (thread_base + sizeof(struct thread_info)) ||
-	    fp >= (thread_base + THREAD_SIZE))
-		return false;
-	return true;
-}
-
 static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
 			      int this_cpu)
 {
@@ -264,11 +253,11 @@
 
 		rw = (struct reg_window *)
 			(regs->u_regs[UREG_FP] + STACK_BIAS);
-		if (kstack_valid(tp, rw)) {
+		if (kstack_valid(tp, (unsigned long) rw)) {
 			global_reg_snapshot[this_cpu].i7 = rw->ins[7];
 			rw = (struct reg_window *)
 				(rw->ins[6] + STACK_BIAS);
-			if (kstack_valid(tp, rw))
+			if (kstack_valid(tp, (unsigned long) rw))
 				global_reg_snapshot[this_cpu].rpc = rw->ins[7];
 		}
 	} else {
@@ -828,7 +817,7 @@
 unsigned long get_wchan(struct task_struct *task)
 {
 	unsigned long pc, fp, bias = 0;
-	unsigned long thread_info_base;
+	struct thread_info *tp;
 	struct reg_window *rw;
         unsigned long ret = 0;
 	int count = 0; 
@@ -837,14 +826,12 @@
             task->state == TASK_RUNNING)
 		goto out;
 
-	thread_info_base = (unsigned long) task_stack_page(task);
+	tp = task_thread_info(task);
 	bias = STACK_BIAS;
 	fp = task_thread_info(task)->ksp + bias;
 
 	do {
-		/* Bogus frame pointer? */
-		if (fp < (thread_info_base + sizeof(struct thread_info)) ||
-		    fp >= (thread_info_base + THREAD_SIZE))
+		if (!kstack_valid(tp, fp))
 			break;
 		rw = (struct reg_window *) fp;
 		pc = rw->ins[7];
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 27b8177..743ccad 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -858,9 +858,7 @@
 extern unsigned long xcall_flush_tlb_mm;
 extern unsigned long xcall_flush_tlb_pending;
 extern unsigned long xcall_flush_tlb_kernel_range;
-#ifdef CONFIG_MAGIC_SYSRQ
 extern unsigned long xcall_fetch_glob_regs;
-#endif
 extern unsigned long xcall_receive_signal;
 extern unsigned long xcall_new_mmu_context_version;
 #ifdef CONFIG_KGDB
@@ -1005,12 +1003,10 @@
 }
 #endif
 
-#ifdef CONFIG_MAGIC_SYSRQ
 void smp_fetch_global_regs(void)
 {
 	smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
 }
-#endif
 
 /* We know that the window frames of the user have been flushed
  * to the stack before we get here because all callers of us
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
index e9d7f06..4e21d4a 100644
--- a/arch/sparc64/kernel/stacktrace.c
+++ b/arch/sparc64/kernel/stacktrace.c
@@ -5,10 +5,12 @@
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
+#include "kstack.h"
+
 void save_stack_trace(struct stack_trace *trace)
 {
-	unsigned long ksp, fp, thread_base;
 	struct thread_info *tp = task_thread_info(current);
+	unsigned long ksp, fp;
 
 	stack_trace_flush();
 
@@ -18,23 +20,18 @@
 	);
 
 	fp = ksp + STACK_BIAS;
-	thread_base = (unsigned long) tp;
 	do {
 		struct sparc_stackf *sf;
 		struct pt_regs *regs;
 		unsigned long pc;
 
-		/* Bogus frame pointer? */
-		if (fp < (thread_base + sizeof(struct thread_info)) ||
-		    fp > (thread_base + THREAD_SIZE - sizeof(struct sparc_stackf)))
+		if (!kstack_valid(tp, fp))
 			break;
 
 		sf = (struct sparc_stackf *) fp;
 		regs = (struct pt_regs *) (sf + 1);
 
-		if (((unsigned long)regs <=
-		     (thread_base + THREAD_SIZE - sizeof(*regs))) &&
-		    (regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
+		if (kstack_is_trap_frame(tp, regs)) {
 			if (!(regs->tstate & TSTATE_PRIV))
 				break;
 			pc = regs->tpc;
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 404e856..3d92412 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -39,6 +39,7 @@
 #include <asm/prom.h>
 
 #include "entry.h"
+#include "kstack.h"
 
 /* When an irrecoverable trap occurs at tl > 0, the trap entry
  * code logs the trap state registers at every level in the trap
@@ -2115,14 +2116,12 @@
 		struct pt_regs *regs;
 		unsigned long pc;
 
-		/* Bogus frame pointer? */
-		if (fp < (thread_base + sizeof(struct thread_info)) ||
-		    fp >= (thread_base + THREAD_SIZE))
+		if (!kstack_valid(tp, fp))
 			break;
 		sf = (struct sparc_stackf *) fp;
 		regs = (struct pt_regs *) (sf + 1);
 
-		if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
+		if (kstack_is_trap_frame(tp, regs)) {
 			if (!(regs->tstate & TSTATE_PRIV))
 				break;
 			pc = regs->tpc;
diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
index 7735a7a..fad90dd 100644
--- a/arch/sparc64/lib/mcount.S
+++ b/arch/sparc64/lib/mcount.S
@@ -48,12 +48,45 @@
 	sub		%g3, STACK_BIAS, %g3
 	cmp		%sp, %g3
 	bg,pt		%xcc, 1f
-	 sethi		%hi(panicstring), %g3
+	 nop
+	lduh		[%g6 + TI_CPU], %g1
+	sethi		%hi(hardirq_stack), %g3
+	or		%g3, %lo(hardirq_stack), %g3
+	sllx		%g1, 3, %g1
+	ldx		[%g3 + %g1], %g7
+	sub		%g7, STACK_BIAS, %g7
+	cmp		%sp, %g7
+	bleu,pt		%xcc, 2f
+	 sethi		%hi(THREAD_SIZE), %g3
+	add		%g7, %g3, %g7
+	cmp		%sp, %g7
+	blu,pn		%xcc, 1f
+2:	 sethi		%hi(softirq_stack), %g3
+	or		%g3, %lo(softirq_stack), %g3
+	ldx		[%g3 + %g1], %g7
+	cmp		%sp, %g7
+	bleu,pt		%xcc, 2f
+	 sethi		%hi(THREAD_SIZE), %g3
+	add		%g7, %g3, %g7
+	cmp		%sp, %g7
+	blu,pn		%xcc, 1f
+	 nop
+	/* If we are already on ovstack, don't hop onto it
+	 * again, we are already trying to output the stack overflow
+	 * message.
+	 */
 	sethi		%hi(ovstack), %g7		! cant move to panic stack fast enough
 	 or		%g7, %lo(ovstack), %g7
-	add		%g7, OVSTACKSIZE, %g7
+	add		%g7, OVSTACKSIZE, %g3
+	sub		%g3, STACK_BIAS + 192, %g3
 	sub		%g7, STACK_BIAS, %g7
-	mov		%g7, %sp
+	cmp		%sp, %g7
+	blu,pn		%xcc, 2f
+	 cmp		%sp, %g3
+	bleu,pn		%xcc, 1f
+	 nop
+2:	mov		%g3, %sp
+	sethi		%hi(panicstring), %g3
 	call		prom_printf
 	 or		%g3, %lo(panicstring), %o0
 	call		prom_halt
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4e821b3..217de3e 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -49,6 +49,7 @@
 #include <asm/sstate.h>
 #include <asm/mdesc.h>
 #include <asm/cpudata.h>
+#include <asm/irq.h>
 
 #define MAX_PHYS_ADDRESS	(1UL << 42UL)
 #define KPTE_BITMAP_CHUNK_SZ	(256UL * 1024UL * 1024UL)
@@ -1771,6 +1772,16 @@
 	if (tlb_type == hypervisor)
 		sun4v_mdesc_init();
 
+	/* Once the OF device tree and MDESC have been setup, we know
+	 * the list of possible cpus.  Therefore we can allocate the
+	 * IRQ stacks.
+	 */
+	for_each_possible_cpu(i) {
+		/* XXX Use node local allocations... XXX */
+		softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+		hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+	}
+
 	/* Setup bootmem... */
 	last_valid_pfn = end_pfn = bootmem_init(phys_base);
 
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index ff1dc44..86773e8 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -480,7 +480,6 @@
 	b		rtrap_xcall
 	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
 
-#ifdef CONFIG_MAGIC_SYSRQ
 	.globl		xcall_fetch_glob_regs
 xcall_fetch_glob_regs:
 	sethi		%hi(global_reg_snapshot), %g1
@@ -511,7 +510,6 @@
 	membar		#StoreStore
 	stx		%g3, [%g1 + GR_SNAP_THREAD]
 	retry
-#endif /* CONFIG_MAGIC_SYSRQ */
 
 #ifdef DCACHE_ALIASING_POSSIBLE
 	.align		32
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 22d7d05..de39e1f 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -101,16 +101,13 @@
  */
 static int iommu_completion_wait(struct amd_iommu *iommu)
 {
-	int ret;
+	int ret, ready = 0;
+	unsigned status = 0;
 	struct iommu_cmd cmd;
-	volatile u64 ready = 0;
-	unsigned long ready_phys = virt_to_phys(&ready);
 	unsigned long i = 0;
 
 	memset(&cmd, 0, sizeof(cmd));
-	cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK;
-	cmd.data[1] = upper_32_bits(ready_phys);
-	cmd.data[2] = 1; /* value written to 'ready' */
+	cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
 	CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
 
 	iommu->need_sync = 0;
@@ -122,9 +119,15 @@
 
 	while (!ready && (i < EXIT_LOOP_COUNT)) {
 		++i;
-		cpu_relax();
+		/* wait for the bit to become one */
+		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+		ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
 	}
 
+	/* set bit back to zero */
+	status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
+	writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
 	if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
 		printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
 
@@ -161,7 +164,7 @@
 	address &= PAGE_MASK;
 	CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
 	cmd.data[1] |= domid;
-	cmd.data[2] = LOW_U32(address);
+	cmd.data[2] = lower_32_bits(address);
 	cmd.data[3] = upper_32_bits(address);
 	if (s) /* size bit - we flush more than one 4kb page */
 		cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index d9a9da5..a69cc0f 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -801,6 +801,21 @@
 }
 
 /*
+ * Init the device table to not allow DMA access for devices and
+ * suppress all page faults
+ */
+static void init_device_table(void)
+{
+	u16 devid;
+
+	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+		set_dev_entry_bit(devid, DEV_ENTRY_VALID);
+		set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
+		set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);
+	}
+}
+
+/*
  * This function finally enables all IOMMUs found in the system after
  * they have been initialized
  */
@@ -931,6 +946,9 @@
 	if (amd_iommu_pd_alloc_bitmap == NULL)
 		goto free;
 
+	/* init the device table */
+	init_device_table();
+
 	/*
 	 * let all alias entries point to itself
 	 */
@@ -954,10 +972,6 @@
 	if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
 		goto free;
 
-	ret = amd_iommu_init_dma_ops();
-	if (ret)
-		goto free;
-
 	ret = sysdev_class_register(&amd_iommu_sysdev_class);
 	if (ret)
 		goto free;
@@ -966,6 +980,10 @@
 	if (ret)
 		goto free;
 
+	ret = amd_iommu_init_dma_ops();
+	if (ret)
+		goto free;
+
 	enable_iommus();
 
 	printk(KERN_INFO "AMD IOMMU: aperture size is %d MB\n",
diff --git a/crypto/digest.c b/crypto/digest.c
index ac09194..5d3f130 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -225,7 +225,7 @@
 	struct ahash_tfm  *crt  = &tfm->crt_ahash;
 	struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
 
-	if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
+	if (dalg->dia_digestsize > PAGE_SIZE / 8)
 		return -EINVAL;
 
 	crt->init       = digest_async_init;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 59821a2..6636802 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -481,21 +481,31 @@
 
 			for (k = 0, temp = 0; k < template[i].np; k++) {
 				printk(KERN_INFO "page %u\n", k);
-				q = &axbuf[IDX[k]];
-				hexdump(q, template[i].tap[k]);
+				q = &xbuf[IDX[k]];
+
+				n = template[i].tap[k];
+				if (k == template[i].np - 1)
+					n += enc ? authsize : -authsize;
+				hexdump(q, n);
 				printk(KERN_INFO "%s\n",
-				       memcmp(q, template[i].result + temp,
-					      template[i].tap[k] -
-					      (k < template[i].np - 1 || enc ?
-					       0 : authsize)) ?
+				       memcmp(q, template[i].result + temp, n) ?
 				       "fail" : "pass");
 
-				for (n = 0; q[template[i].tap[k] + n]; n++)
-					;
+				q += n;
+				if (k == template[i].np - 1 && !enc) {
+					if (memcmp(q, template[i].input +
+						      temp + n, authsize))
+						n = authsize;
+					else
+						n = 0;
+				} else {
+					for (n = 0; q[n]; n++)
+						;
+				}
 				if (n) {
 					printk("Result buffer corruption %u "
 					       "bytes:\n", n);
-					hexdump(&q[template[i].tap[k]], n);
+					hexdump(q, n);
 				}
 
 				temp += template[i].tap[k];
diff --git a/drivers/Makefile b/drivers/Makefile
index a280ab3..2735bde 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -57,6 +57,7 @@
 obj-$(CONFIG_PARIDE) 		+= block/paride/
 obj-$(CONFIG_TC)		+= tc/
 obj-$(CONFIG_USB)		+= usb/
+obj-$(CONFIG_USB_MUSB_HDRC)	+= usb/musb/
 obj-$(CONFIG_PCI)		+= usb/
 obj-$(CONFIG_USB_GADGET)	+= usb/gadget/
 obj-$(CONFIG_SERIO)		+= input/serio/
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index f7feae4..128202e 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -31,6 +31,7 @@
 #include <asm/io.h>
 #include <asm/msr.h>
 #include <asm/cpufeature.h>
+#include <asm/i387.h>
 
 
 #define PFX	KBUILD_MODNAME ": "
@@ -67,16 +68,23 @@
  * Another possible performance boost may come from simply buffering
  * until we have 4 bytes, thus returning a u32 at a time,
  * instead of the current u8-at-a-time.
+ *
+ * Padlock instructions can generate a spurious DNA fault, so
+ * we have to call them in the context of irq_ts_save/restore()
  */
 
 static inline u32 xstore(u32 *addr, u32 edx_in)
 {
 	u32 eax_out;
+	int ts_state;
+
+	ts_state = irq_ts_save();
 
 	asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
 		:"=m"(*addr), "=a"(eax_out)
 		:"D"(addr), "d"(edx_in));
 
+	irq_ts_restore(ts_state);
 	return eax_out;
 }
 
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 54a2a16..bf2917d 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -16,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <asm/byteorder.h>
+#include <asm/i387.h>
 #include "padlock.h"
 
 /* Control word. */
@@ -141,6 +142,12 @@
 	asm volatile ("pushfl; popfl");
 }
 
+/*
+ * While the padlock instructions don't use FP/SSE registers, they
+ * generate a spurious DNA fault when cr0.ts is '1'. These instructions
+ * should be used only inside the irq_ts_save/restore() context
+ */
+
 static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
 				  void *control_word)
 {
@@ -205,15 +212,23 @@
 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
 	struct aes_ctx *ctx = aes_ctx(tfm);
+	int ts_state;
 	padlock_reset_key();
+
+	ts_state = irq_ts_save();
 	aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
+	irq_ts_restore(ts_state);
 }
 
 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
 	struct aes_ctx *ctx = aes_ctx(tfm);
+	int ts_state;
 	padlock_reset_key();
+
+	ts_state = irq_ts_save();
 	aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
+	irq_ts_restore(ts_state);
 }
 
 static struct crypto_alg aes_alg = {
@@ -244,12 +259,14 @@
 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 	int err;
+	int ts_state;
 
 	padlock_reset_key();
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt(desc, &walk);
 
+	ts_state = irq_ts_save();
 	while ((nbytes = walk.nbytes)) {
 		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
 				   ctx->E, &ctx->cword.encrypt,
@@ -257,6 +274,7 @@
 		nbytes &= AES_BLOCK_SIZE - 1;
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
+	irq_ts_restore(ts_state);
 
 	return err;
 }
@@ -268,12 +286,14 @@
 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 	int err;
+	int ts_state;
 
 	padlock_reset_key();
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt(desc, &walk);
 
+	ts_state = irq_ts_save();
 	while ((nbytes = walk.nbytes)) {
 		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
 				   ctx->D, &ctx->cword.decrypt,
@@ -281,7 +301,7 @@
 		nbytes &= AES_BLOCK_SIZE - 1;
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
-
+	irq_ts_restore(ts_state);
 	return err;
 }
 
@@ -314,12 +334,14 @@
 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 	int err;
+	int ts_state;
 
 	padlock_reset_key();
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt(desc, &walk);
 
+	ts_state = irq_ts_save();
 	while ((nbytes = walk.nbytes)) {
 		u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
 					    walk.dst.virt.addr, ctx->E,
@@ -329,6 +351,7 @@
 		nbytes &= AES_BLOCK_SIZE - 1;
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
+	irq_ts_restore(ts_state);
 
 	return err;
 }
@@ -340,12 +363,14 @@
 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 	int err;
+	int ts_state;
 
 	padlock_reset_key();
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt(desc, &walk);
 
+	ts_state = irq_ts_save();
 	while ((nbytes = walk.nbytes)) {
 		padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
 				   ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -354,6 +379,7 @@
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
 
+	irq_ts_restore(ts_state);
 	return err;
 }
 
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 40d5680..a7fbade 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/scatterlist.h>
+#include <asm/i387.h>
 #include "padlock.h"
 
 #define SHA1_DEFAULT_FALLBACK	"sha1-generic"
@@ -102,6 +103,7 @@
 	 *     PadLock microcode needs it that big. */
 	char buf[128+16];
 	char *result = NEAREST_ALIGNED(buf);
+	int ts_state;
 
 	((uint32_t *)result)[0] = SHA1_H0;
 	((uint32_t *)result)[1] = SHA1_H1;
@@ -109,9 +111,12 @@
 	((uint32_t *)result)[3] = SHA1_H3;
 	((uint32_t *)result)[4] = SHA1_H4;
  
+	/* prevent taking the spurious DNA fault with padlock. */
+	ts_state = irq_ts_save();
 	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
 		      : "+S"(in), "+D"(result)
 		      : "c"(count), "a"(0));
+	irq_ts_restore(ts_state);
 
 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
 }
@@ -123,6 +128,7 @@
 	 *     PadLock microcode needs it that big. */
 	char buf[128+16];
 	char *result = NEAREST_ALIGNED(buf);
+	int ts_state;
 
 	((uint32_t *)result)[0] = SHA256_H0;
 	((uint32_t *)result)[1] = SHA256_H1;
@@ -133,9 +139,12 @@
 	((uint32_t *)result)[6] = SHA256_H6;
 	((uint32_t *)result)[7] = SHA256_H7;
 
+	/* prevent taking the spurious DNA fault with padlock. */
+	ts_state = irq_ts_save();
 	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
 		      : "+S"(in), "+D"(result)
 		      : "c"(count), "a"(0));
+	irq_ts_restore(ts_state);
 
 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
 }
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 681c15f..ee827a7 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -96,6 +96,9 @@
 	unsigned int exec_units;
 	unsigned int desc_types;
 
+	/* SEC Compatibility info */
+	unsigned long features;
+
 	/* next channel to be assigned next incoming descriptor */
 	atomic_t last_chan;
 
@@ -133,6 +136,9 @@
 	struct hwrng rng;
 };
 
+/* .features flag */
+#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
+
 /*
  * map virtual single (contiguous) pointer to h/w descriptor pointer
  */
@@ -785,7 +791,7 @@
 	/* copy the generated ICV to dst */
 	if (edesc->dma_len) {
 		icvdata = &edesc->link_tbl[edesc->src_nents +
-					   edesc->dst_nents + 1];
+					   edesc->dst_nents + 2];
 		sg = sg_last(areq->dst, edesc->dst_nents);
 		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
 		       icvdata, ctx->authsize);
@@ -814,7 +820,7 @@
 		/* auth check */
 		if (edesc->dma_len)
 			icvdata = &edesc->link_tbl[edesc->src_nents +
-						   edesc->dst_nents + 1];
+						   edesc->dst_nents + 2];
 		else
 			icvdata = &edesc->link_tbl[0];
 
@@ -921,10 +927,30 @@
 		sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
 					  &edesc->link_tbl[0]);
 		if (sg_count > 1) {
+			struct talitos_ptr *link_tbl_ptr =
+				&edesc->link_tbl[sg_count-1];
+			struct scatterlist *sg;
+			struct talitos_private *priv = dev_get_drvdata(dev);
+
 			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
 			desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
 			dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
 						   edesc->dma_len, DMA_BIDIRECTIONAL);
+			/* If necessary for this SEC revision,
+			 * add a link table entry for ICV.
+			 */
+			if ((priv->features &
+			     TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) &&
+			    (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
+				link_tbl_ptr->j_extent = 0;
+				link_tbl_ptr++;
+				link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+				link_tbl_ptr->len = cpu_to_be16(authsize);
+				sg = sg_last(areq->src, edesc->src_nents ? : 1);
+				link_tbl_ptr->ptr = cpu_to_be32(
+						(char *)sg_dma_address(sg)
+						+ sg->length - authsize);
+			}
 		} else {
 			/* Only one segment now, so no link tbl needed */
 			desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
@@ -944,12 +970,11 @@
 		desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
 	} else {
 		struct talitos_ptr *link_tbl_ptr =
-			&edesc->link_tbl[edesc->src_nents];
-		struct scatterlist *sg;
+			&edesc->link_tbl[edesc->src_nents + 1];
 
 		desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
 					       edesc->dma_link_tbl +
-					       edesc->src_nents);
+					       edesc->src_nents + 1);
 		if (areq->src == areq->dst) {
 			memcpy(link_tbl_ptr, &edesc->link_tbl[0],
 			       edesc->src_nents * sizeof(struct talitos_ptr));
@@ -957,14 +982,10 @@
 			sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
 						  link_tbl_ptr);
 		}
+		/* Add an entry to the link table for ICV data */
 		link_tbl_ptr += sg_count - 1;
-
-		/* handle case where sg_last contains the ICV exclusively */
-		sg = sg_last(areq->dst, edesc->dst_nents);
-		if (sg->length == ctx->authsize)
-			link_tbl_ptr--;
-
 		link_tbl_ptr->j_extent = 0;
+		sg_count++;
 		link_tbl_ptr++;
 		link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
 		link_tbl_ptr->len = cpu_to_be16(authsize);
@@ -973,7 +994,7 @@
 		link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
 						edesc->dma_link_tbl +
 					        edesc->src_nents +
-						edesc->dst_nents + 1);
+						edesc->dst_nents + 2);
 
 		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
 		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1040,12 +1061,12 @@
 
 	/*
 	 * allocate space for base edesc plus the link tables,
-	 * allowing for a separate entry for the generated ICV (+ 1),
+	 * allowing for two separate entries for ICV and generated ICV (+ 2),
 	 * and the ICV data itself
 	 */
 	alloc_len = sizeof(struct ipsec_esp_edesc);
 	if (src_nents || dst_nents) {
-		dma_len = (src_nents + dst_nents + 1) *
+		dma_len = (src_nents + dst_nents + 2) *
 				 sizeof(struct talitos_ptr) + ctx->authsize;
 		alloc_len += dma_len;
 	} else {
@@ -1104,7 +1125,7 @@
 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
 	if (edesc->dma_len)
 		icvdata = &edesc->link_tbl[edesc->src_nents +
-					   edesc->dst_nents + 1];
+					   edesc->dst_nents + 2];
 	else
 		icvdata = &edesc->link_tbl[0];
 
@@ -1480,6 +1501,9 @@
 		goto err_out;
 	}
 
+	if (of_device_is_compatible(np, "fsl,sec3.0"))
+		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
+
 	priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
 				  GFP_KERNEL);
 	priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 18355ae..4655b79 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -1593,7 +1593,7 @@
 	if (machine_is_omap_h2()) {
 		/* full speed signaling by default */
 		isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1,
-			MC1_SPEED_REG);
+			MC1_SPEED);
 		isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2,
 			MC2_SPD_SUSP_CTRL);
 
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 66bafe3..692a79e 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -1,10 +1,11 @@
 #ifndef _I8042_SPARCIO_H
 #define _I8042_SPARCIO_H
 
+#include <linux/of_device.h>
+
 #include <asm/io.h>
 #include <asm/oplib.h>
 #include <asm/prom.h>
-#include <asm/of_device.h>
 
 static int i8042_kbd_irq = -1;
 static int i8042_aux_irq = -1;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 4bf4f7b..b468f90 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -40,20 +40,20 @@
 #define DP(__mask, __fmt, __args...) do { \
 	if (bp->msglevel & (__mask)) \
 		printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
-			bp->dev?(bp->dev->name):"?", ##__args); \
+			bp->dev ? (bp->dev->name) : "?", ##__args); \
 	} while (0)
 
 /* errors debug print */
 #define BNX2X_DBG_ERR(__fmt, __args...) do { \
 	if (bp->msglevel & NETIF_MSG_PROBE) \
 		printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
-			bp->dev?(bp->dev->name):"?", ##__args); \
+			bp->dev ? (bp->dev->name) : "?", ##__args); \
 	} while (0)
 
 /* for errors (never masked) */
 #define BNX2X_ERR(__fmt, __args...) do { \
 	printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
-		bp->dev?(bp->dev->name):"?", ##__args); \
+		bp->dev ? (bp->dev->name) : "?", ##__args); \
 	} while (0)
 
 /* before we have a dev->name use dev_info() */
@@ -120,16 +120,8 @@
 #define SHMEM_RD(bp, field)		REG_RD(bp, SHMEM_ADDR(bp, field))
 #define SHMEM_WR(bp, field, val)	REG_WR(bp, SHMEM_ADDR(bp, field), val)
 
-#define NIG_WR(reg, val)	REG_WR(bp, reg, val)
-#define EMAC_WR(reg, val)	REG_WR(bp, emac_base + reg, val)
-#define BMAC_WR(reg, val)	REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
-
-
-#define for_each_queue(bp, var)	for (var = 0; var < bp->num_queues; var++)
-
-#define for_each_nondefault_queue(bp, var) \
-				for (var = 1; var < bp->num_queues; var++)
-#define is_multi(bp)		(bp->num_queues > 1)
+#define EMAC_RD(bp, reg)		REG_RD(bp, emac_base + reg)
+#define EMAC_WR(bp, reg, val)		REG_WR(bp, emac_base + reg, val)
 
 
 /* fast path */
@@ -163,7 +155,7 @@
 #define NUM_RX_SGE_PAGES		2
 #define RX_SGE_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
 #define MAX_RX_SGE_CNT			(RX_SGE_CNT - 2)
-/* RX_SGE_CNT is promissed to be a power of 2 */
+/* RX_SGE_CNT is promised to be a power of 2 */
 #define RX_SGE_MASK			(RX_SGE_CNT - 1)
 #define NUM_RX_SGE			(RX_SGE_CNT * NUM_RX_SGE_PAGES)
 #define MAX_RX_SGE			(NUM_RX_SGE - 1)
@@ -258,8 +250,7 @@
 
 	unsigned long		tx_pkt,
 				rx_pkt,
-				rx_calls,
-				rx_alloc_failed;
+				rx_calls;
 	/* TPA related */
 	struct sw_rx_bd		tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
 	u8			tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
@@ -275,6 +266,15 @@
 
 #define bnx2x_fp(bp, nr, var)		(bp->fp[nr].var)
 
+#define BNX2X_HAS_TX_WORK(fp) \
+			((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
+			 (fp->tx_pkt_prod != fp->tx_pkt_cons))
+
+#define BNX2X_HAS_RX_WORK(fp) \
+			(fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb))
+
+#define BNX2X_HAS_WORK(fp)	(BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
+
 
 /* MC hsi */
 #define MAX_FETCH_BD			13	/* HW max BDs per packet */
@@ -317,7 +317,7 @@
 #define RCQ_BD(x)			((x) & MAX_RCQ_BD)
 
 
-/* This is needed for determening of last_max */
+/* This is needed for determining of last_max */
 #define SUB_S16(a, b)			(s16)((s16)(a) - (s16)(b))
 
 #define __SGE_MASK_SET_BIT(el, bit) \
@@ -386,20 +386,28 @@
 #define TPA_TYPE(cqe_fp_flags)		((cqe_fp_flags) & \
 					 (TPA_TYPE_START | TPA_TYPE_END))
 
-#define BNX2X_RX_SUM_OK(cqe) \
-			(!(cqe->fast_path_cqe.status_flags & \
-			 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \
-			  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)))
+#define ETH_RX_ERROR_FALGS		ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+#define BNX2X_IP_CSUM_ERR(cqe) \
+			(!((cqe)->fast_path_cqe.status_flags & \
+			   ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+			 ((cqe)->fast_path_cqe.type_error_flags & \
+			  ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+
+#define BNX2X_L4_CSUM_ERR(cqe) \
+			(!((cqe)->fast_path_cqe.status_flags & \
+			   ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+			 ((cqe)->fast_path_cqe.type_error_flags & \
+			  ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+
+#define BNX2X_RX_CSUM_OK(cqe) \
+			(!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
 
 #define BNX2X_RX_SUM_FIX(cqe) \
 			((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
 			  PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
 			 (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
 
-#define ETH_RX_ERROR_FALGS	(ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
-				 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
-				 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
-
 
 #define FP_USB_FUNC_OFF			(2 + 2*HC_USTORM_SB_NUM_INDICES)
 #define FP_CSB_FUNC_OFF			(2 + 2*HC_CSTORM_SB_NUM_INDICES)
@@ -647,6 +655,8 @@
 
 	u32 brb_drop_hi;
 	u32 brb_drop_lo;
+	u32 brb_truncate_hi;
+	u32 brb_truncate_lo;
 
 	u32 jabber_packets_received;
 
@@ -663,6 +673,9 @@
 	u32 mac_discard;
 
 	u32 driver_xoff;
+	u32 rx_err_discard_pkt;
+	u32 rx_skb_alloc_failed;
+	u32 hw_csum_err;
 };
 
 #define STATS_OFFSET32(stat_name) \
@@ -753,7 +766,6 @@
 	u16			def_att_idx;
 	u32			attn_state;
 	struct attn_route	attn_group[MAX_DYNAMIC_ATTN_GRPS];
-	u32			aeu_mask;
 	u32			nig_mask;
 
 	/* slow path ring */
@@ -772,7 +784,7 @@
 	u8			stats_pending;
 	u8			set_mac_pending;
 
-	/* End of fileds used in the performance code paths */
+	/* End of fields used in the performance code paths */
 
 	int			panic;
 	int			msglevel;
@@ -794,9 +806,6 @@
 #define BP_FUNC(bp)			(bp->func)
 #define BP_E1HVN(bp)			(bp->func >> 1)
 #define BP_L_ID(bp)			(BP_E1HVN(bp) << 2)
-/* assorted E1HVN */
-#define IS_E1HMF(bp)			(bp->e1hmf != 0)
-#define BP_MAX_QUEUES(bp)		(IS_E1HMF(bp) ? 4 : 16)
 
 	int			pm_cap;
 	int			pcie_cap;
@@ -821,6 +830,7 @@
 	u32			mf_config;
 	u16			e1hov;
 	u8			e1hmf;
+#define IS_E1HMF(bp)			(bp->e1hmf != 0)
 
 	u8			wol;
 
@@ -836,7 +846,6 @@
 	u16			rx_ticks_int;
 	u16			rx_ticks;
 
-	u32			stats_ticks;
 	u32			lin_cnt;
 
 	int			state;
@@ -852,6 +861,7 @@
 #define BNX2X_STATE_ERROR		0xf000
 
 	int			num_queues;
+#define BP_MAX_QUEUES(bp)		(IS_E1HMF(bp) ? 4 : 16)
 
 	u32			rx_mode;
 #define BNX2X_RX_MODE_NONE		0
@@ -902,10 +912,17 @@
 };
 
 
+#define for_each_queue(bp, var)	for (var = 0; var < bp->num_queues; var++)
+
+#define for_each_nondefault_queue(bp, var) \
+				for (var = 1; var < bp->num_queues; var++)
+#define is_multi(bp)		(bp->num_queues > 1)
+
+
 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
 		      u32 len32);
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 
 static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 			   int wait)
@@ -976,7 +993,7 @@
 #define PCICFG_LINK_SPEED_SHIFT		16
 
 
-#define BNX2X_NUM_STATS			39
+#define BNX2X_NUM_STATS			42
 #define BNX2X_NUM_TESTS			8
 
 #define BNX2X_MAC_LOOPBACK		0
@@ -1007,10 +1024,10 @@
 /* resolution of the rate shaping timer - 100 usec */
 #define RS_PERIODIC_TIMEOUT_USEC	100
 /* resolution of fairness algorithm in usecs -
-   coefficient for clauclating the actuall t fair */
+   coefficient for calculating the actual t fair */
 #define T_FAIR_COEF			10000000
 /* number of bytes in single QM arbitration cycle -
-   coeffiecnt for calculating the fairness timer */
+   coefficient for calculating the fairness timer */
 #define QM_ARB_BYTES			40000
 #define FAIR_MEM			2
 
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index e3da7f6..192fa98 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -9,165 +9,171 @@
 
 
 #define CSTORM_ASSERT_LIST_INDEX_OFFSET \
-	(IS_E1H_OFFSET? 0x7000 : 0x1000)
+	(IS_E1H_OFFSET ? 0x7000 : 0x1000)
 #define CSTORM_ASSERT_LIST_OFFSET(idx) \
-	(IS_E1H_OFFSET? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+	(IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
 #define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
-	(IS_E1H_OFFSET? (0x8522 + ((function>>1) * 0x40) + ((function&1) \
-	* 0x100) + (index * 0x4)) : (0x1922 + (function * 0x40) + (index \
-	* 0x4)))
+	(IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \
+	((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \
+	0x40) + (index * 0x4)))
 #define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x8500 + ((function>>1) * 0x40) + ((function&1) \
-	* 0x100)) : (0x1900 + (function * 0x40)))
+	(IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \
+	((function&1) * 0x100)) : (0x1900 + (function * 0x40)))
 #define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x8508 + ((function>>1) * 0x40) + ((function&1) \
-	* 0x100)) : (0x1908 + (function * 0x40)))
+	(IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \
+	((function&1) * 0x100)) : (0x1908 + (function * 0x40)))
 #define CSTORM_FUNCTION_MODE_OFFSET \
-	(IS_E1H_OFFSET? 0x11e8 : 0xffffffff)
+	(IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
 #define CSTORM_HC_BTR_OFFSET(port) \
-	(IS_E1H_OFFSET? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
+	(IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
 #define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
-	(IS_E1H_OFFSET? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
+	(IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
 	(index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
 	(index * 0x4)))
 #define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
-	(IS_E1H_OFFSET? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
+	(IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
 	(index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
 	(index * 0x4)))
 #define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
-	(IS_E1H_OFFSET? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
+	(IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
 	(0x1400 + (port * 0x280) + (cpu_id * 0x28)))
 #define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
-	(IS_E1H_OFFSET? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
+	(IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
 	(0x1408 + (port * 0x280) + (cpu_id * 0x28)))
 #define CSTORM_STATS_FLAGS_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x1108 + (function * 0x8)) : (0x5108 + \
+	(IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
 	(function * 0x8)))
 #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x31c0 + (function * 0x20)) : 0xffffffff)
+	(IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff)
 #define TSTORM_ASSERT_LIST_INDEX_OFFSET \
-	(IS_E1H_OFFSET? 0xa000 : 0x1000)
+	(IS_E1H_OFFSET ? 0xa000 : 0x1000)
 #define TSTORM_ASSERT_LIST_OFFSET(idx) \
-	(IS_E1H_OFFSET? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+	(IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
 #define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
-	(IS_E1H_OFFSET? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) : \
-	(0x9c8 + (port * 0x2f8) + (client_id * 0x28)))
+	(IS_E1H_OFFSET ? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) \
+	: (0x9c8 + (port * 0x2f8) + (client_id * 0x28)))
 #define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
-	(IS_E1H_OFFSET? (0xb01a + ((function>>1) * 0x28) + ((function&1) \
-	* 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \
-	0x4)))
+	(IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
+	((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
+	0x28) + (index * 0x4)))
 #define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
-	(IS_E1H_OFFSET? (0xb000 + ((function>>1) * 0x28) + ((function&1) \
-	* 0xa0)) : (0x1400 + (function * 0x28)))
+	(IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
+	((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
 #define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
-	(IS_E1H_OFFSET? (0xb008 + ((function>>1) * 0x28) + ((function&1) \
-	* 0xa0)) : (0x1408 + (function * 0x28)))
+	(IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
+	((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
 #define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x2b80 + (function * 0x8)) : (0x4b68 + \
+	(IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \
 	(function * 0x8)))
 #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x3000 + (function * 0x38)) : (0x1500 + \
+	(IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \
 	(function * 0x38)))
 #define TSTORM_FUNCTION_MODE_OFFSET \
-	(IS_E1H_OFFSET? 0x1ad0 : 0xffffffff)
+	(IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff)
 #define TSTORM_HC_BTR_OFFSET(port) \
-	(IS_E1H_OFFSET? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
+	(IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
 #define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x12c8 + (function * 0x80)) : (0x22c8 + \
+	(IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
 	(function * 0x80)))
 #define TSTORM_INDIRECTION_TABLE_SIZE 0x80
 #define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x3008 + (function * 0x38)) : (0x1508 + \
+	(IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \
 	(function * 0x38)))
+#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
+	(IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \
+	0x50)) : (0x4000 + (port * 0x3f0) + (stats_counter_id * 0x38)))
 #define TSTORM_RX_PRODS_OFFSET(port, client_id) \
-	(IS_E1H_OFFSET? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) : \
-	(0x9c0 + (port * 0x2f8) + (client_id * 0x28)))
+	(IS_E1H_OFFSET ? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) \
+	: (0x9c0 + (port * 0x2f8) + (client_id * 0x28)))
 #define TSTORM_STATS_FLAGS_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x2c00 + (function * 0x8)) : (0x4b88 + \
+	(IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \
 	(function * 0x8)))
-#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET? 0x3b30 : 0x1c20)
-#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET? 0xa040 : 0x2c10)
-#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET? 0x2440 : 0x1200)
+#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3b30 : 0x1c20)
+#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10)
+#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200)
 #define USTORM_ASSERT_LIST_INDEX_OFFSET \
-	(IS_E1H_OFFSET? 0x8000 : 0x1000)
+	(IS_E1H_OFFSET ? 0x8000 : 0x1000)
 #define USTORM_ASSERT_LIST_OFFSET(idx) \
-	(IS_E1H_OFFSET? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+	(IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
 #define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
-	(IS_E1H_OFFSET? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \
+	(IS_E1H_OFFSET ? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \
 	(0x5450 + (port * 0x1c8) + (clientId * 0x18)))
 #define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
-	(IS_E1H_OFFSET? (0x951a + ((function>>1) * 0x28) + ((function&1) \
-	* 0xa0) + (index * 0x4)) : (0x191a + (function * 0x28) + (index * \
-	0x4)))
+	(IS_E1H_OFFSET ? (0x951a + ((function>>1) * 0x28) + \
+	((function&1) * 0xa0) + (index * 0x4)) : (0x191a + (function * \
+	0x28) + (index * 0x4)))
 #define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x9500 + ((function>>1) * 0x28) + ((function&1) \
-	* 0xa0)) : (0x1900 + (function * 0x28)))
+	(IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x28) + \
+	((function&1) * 0xa0)) : (0x1900 + (function * 0x28)))
 #define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x9508 + ((function>>1) * 0x28) + ((function&1) \
-	* 0xa0)) : (0x1908 + (function * 0x28)))
+	(IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x28) + \
+	((function&1) * 0xa0)) : (0x1908 + (function * 0x28)))
 #define USTORM_FUNCTION_MODE_OFFSET \
-	(IS_E1H_OFFSET? 0x2448 : 0xffffffff)
+	(IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
 #define USTORM_HC_BTR_OFFSET(port) \
-	(IS_E1H_OFFSET? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8)))
+	(IS_E1H_OFFSET ? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8)))
 #define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
-	(IS_E1H_OFFSET? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \
+	(IS_E1H_OFFSET ? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \
 	(0x5448 + (port * 0x1c8) + (clientId * 0x18)))
 #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x2408 + (function * 0x8)) : (0x5408 + \
+	(IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5408 + \
 	(function * 0x8)))
 #define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
-	(IS_E1H_OFFSET? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
+	(IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
 	(index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
 	(index * 0x4)))
 #define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
-	(IS_E1H_OFFSET? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
+	(IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
 	(index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
 	(index * 0x4)))
 #define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
-	(IS_E1H_OFFSET? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
+	(IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
 	(0x1400 + (port * 0x280) + (cpu_id * 0x28)))
 #define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
-	(IS_E1H_OFFSET? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
+	(IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
 	(0x1408 + (port * 0x280) + (cpu_id * 0x28)))
 #define XSTORM_ASSERT_LIST_INDEX_OFFSET \
-	(IS_E1H_OFFSET? 0x9000 : 0x1000)
+	(IS_E1H_OFFSET ? 0x9000 : 0x1000)
 #define XSTORM_ASSERT_LIST_OFFSET(idx) \
-	(IS_E1H_OFFSET? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+	(IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
 #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
-	(IS_E1H_OFFSET? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40)))
+	(IS_E1H_OFFSET ? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40)))
 #define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
-	(IS_E1H_OFFSET? (0xa01a + ((function>>1) * 0x28) + ((function&1) \
-	* 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \
-	0x4)))
+	(IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
+	((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
+	0x28) + (index * 0x4)))
 #define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
-	(IS_E1H_OFFSET? (0xa000 + ((function>>1) * 0x28) + ((function&1) \
-	* 0xa0)) : (0x1400 + (function * 0x28)))
+	(IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
+	((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
 #define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
-	(IS_E1H_OFFSET? (0xa008 + ((function>>1) * 0x28) + ((function&1) \
-	* 0xa0)) : (0x1408 + (function * 0x28)))
+	(IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
+	((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
 #define XSTORM_E1HOV_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x2ab8 + (function * 0x2)) : 0xffffffff)
+	(IS_E1H_OFFSET ? (0x2ab8 + (function * 0x2)) : 0xffffffff)
 #define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x2418 + (function * 0x8)) : (0x3b70 + \
+	(IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \
 	(function * 0x8)))
 #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x2568 + (function * 0x70)) : (0x3c60 + \
+	(IS_E1H_OFFSET ? (0x2568 + (function * 0x70)) : (0x3c60 + \
 	(function * 0x70)))
 #define XSTORM_FUNCTION_MODE_OFFSET \
-	(IS_E1H_OFFSET? 0x2ac8 : 0xffffffff)
+	(IS_E1H_OFFSET ? 0x2ac8 : 0xffffffff)
 #define XSTORM_HC_BTR_OFFSET(port) \
-	(IS_E1H_OFFSET? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
+	(IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
+#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
+	(IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \
+	0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38)))
 #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x2528 + (function * 0x70)) : (0x3c20 + \
+	(IS_E1H_OFFSET ? (0x2528 + (function * 0x70)) : (0x3c20 + \
 	(function * 0x70)))
 #define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x2000 + (function * 0x10)) : (0x3328 + \
+	(IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
 	(function * 0x10)))
 #define XSTORM_SPQ_PROD_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x2008 + (function * 0x10)) : (0x3330 + \
+	(IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
 	(function * 0x10)))
 #define XSTORM_STATS_FLAGS_OFFSET(function) \
-	(IS_E1H_OFFSET? (0x23d8 + (function * 0x8)) : (0x3b60 + \
+	(IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \
 	(function * 0x8)))
 #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
 
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index d3e8198..efd7644 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1268,7 +1268,7 @@
 
 
 /*
- * IGU driver acknowlegement register
+ * IGU driver acknowledgement register
  */
 struct igu_ack_register {
 #if defined(__BIG_ENDIAN)
@@ -1882,7 +1882,7 @@
 };
 
 /*
- * structure for easy accessability to assembler
+ * structure for easy accessibility to assembler
  */
 struct eth_tx_bd_flags {
 	u8 as_bitfield;
@@ -2044,7 +2044,7 @@
 
 
 /*
- * ethernet doorbell
+ * Ethernet doorbell
  */
 struct eth_tx_doorbell {
 #if defined(__BIG_ENDIAN)
@@ -2256,7 +2256,7 @@
 };
 
 /*
- * union for ramrod data for ethernet protocol (CQE) (force size of 16 bits)
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
  */
 union eth_ramrod_data {
 	struct ramrod_data general;
@@ -2330,7 +2330,7 @@
 };
 
 /*
- * ethernet slow path element
+ * Ethernet slow path element
  */
 union eth_specific_data {
 	u8 protocol_data[8];
@@ -2343,7 +2343,7 @@
 };
 
 /*
- * ethernet slow path element
+ * Ethernet slow path element
  */
 struct eth_spe {
 	struct spe_hdr hdr;
@@ -2615,7 +2615,7 @@
 
 
 /*
- * common flag to indicate existance of TPA.
+ * common flag to indicate existence of TPA.
  */
 struct tstorm_eth_tpa_exist {
 #if defined(__BIG_ENDIAN)
@@ -2765,7 +2765,7 @@
 };
 
 /*
- * Eth statistics query sturcture for the eth_stats_quesry ramrod
+ * Eth statistics query structure for the eth_stats_query ramrod
  */
 struct eth_stats_query {
 	struct xstorm_common_stats xstorm_common;
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 4c77507..130927cf 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -72,26 +72,26 @@
 
 
 struct raw_op {
-	u32 op		:8;
-	u32 offset	:24;
+	u32 op:8;
+	u32 offset:24;
 	u32 raw_data;
 };
 
 struct op_read {
-	u32 op		:8;
-	u32 offset	:24;
+	u32 op:8;
+	u32 offset:24;
 	u32 pad;
 };
 
 struct op_write {
-	u32 op		:8;
-	u32 offset	:24;
+	u32 op:8;
+	u32 offset:24;
 	u32 val;
 };
 
 struct op_string_write {
-	u32 op		:8;
-	u32 offset	:24;
+	u32 op:8;
+	u32 offset:24;
 #ifdef __LITTLE_ENDIAN
 	u16 data_off;
 	u16 data_len;
@@ -102,8 +102,8 @@
 };
 
 struct op_zero {
-	u32 op		:8;
-	u32 offset	:24;
+	u32 op:8;
+	u32 offset:24;
 	u32 len;
 };
 
@@ -208,7 +208,7 @@
 /*********************************************************
    There are different blobs for each PRAM section.
    In addition, each blob write operation is divided into a few operations
-   in order to decrease the amount of phys. contigious buffer needed.
+   in order to decrease the amount of phys. contiguous buffer needed.
    Thus, when we select a blob the address may be with some offset
    from the beginning of PRAM section.
    The same holds for the INT_TABLE sections.
@@ -336,7 +336,7 @@
 		len = op->str_wr.data_len;
 		data = data_base + op->str_wr.data_off;
 
-		/* carefull! it must be in order */
+		/* careful! it must be in order */
 		if (unlikely(op_type > OP_WB)) {
 
 			/* If E1 only */
@@ -740,7 +740,7 @@
 	return crc_res;
 }
 
-/* regiesers addresses are not in order
+/* registers addresses are not in order
    so these arrays help simplify the code */
 static const int cm_start[E1H_FUNC_MAX][9] = {
 	{MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
diff --git a/drivers/net/bnx2x_init_values.h b/drivers/net/bnx2x_init_values.h
index 6301905..9755bf6 100644
--- a/drivers/net/bnx2x_init_values.h
+++ b/drivers/net/bnx2x_init_values.h
@@ -901,31 +901,28 @@
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x400},
-	{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c00, 0x2},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42},
-	{OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2c00 + 0x8, 0x20278},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2},
-	{OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x2027a},
-	{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294},
+	{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293},
+	{OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x20278},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400},
+	{OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027a},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2},
+	{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2},
-	{OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027c},
 	{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2},
 	{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298},
 	{OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
-	{OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027e},
+	{OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027c},
 	{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a},
 	{OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0},
-	{OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028e},
+	{OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028c},
 	{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa},
 	{OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000},
 	{OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000},
-	{OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029e},
+	{OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029c},
 	{OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba},
 	{OP_ZP_E1, USEM_REG_PRAM, 0x311c0000},
 	{OP_ZP_E1H, USEM_REG_PRAM, 0x31070000},
@@ -933,11 +930,11 @@
 	{OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42},
 	{OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919},
 	{OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906},
-	{OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x500402a0},
+	{OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x5004029e},
 	{OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d},
 	{OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc},
-#define USEM_COMMON_END         790
-#define USEM_PORT0_START        790
+#define USEM_COMMON_END         787
+#define USEM_PORT0_START        787
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa},
@@ -950,44 +947,27 @@
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3100, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3200, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3300, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3400, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3500, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3600, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3700, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3800, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3900, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a00, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b00, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c00, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d00, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e00, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f00, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c10, 0x2},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc},
-#define USEM_PORT0_END          838
-#define USEM_PORT1_START        838
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc},
+#define USEM_PORT0_END          818
+#define USEM_PORT1_START        818
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa},
@@ -1000,76 +980,59 @@
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3080, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3180, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3280, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3380, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3480, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3580, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3680, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3780, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3880, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3980, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a80, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b80, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c80, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d80, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e80, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f80, 0x20},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52},
-	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c20, 0x2},
-	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20},
 	{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc},
-#define USEM_PORT1_END          886
-#define USEM_FUNC0_START        886
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52},
+	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc},
+#define USEM_PORT1_END          849
+#define USEM_FUNC0_START        849
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2},
-#define USEM_FUNC0_END          888
-#define USEM_FUNC1_START        888
+#define USEM_FUNC0_END          851
+#define USEM_FUNC1_START        851
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2},
-#define USEM_FUNC1_END          890
-#define USEM_FUNC2_START        890
+#define USEM_FUNC1_END          853
+#define USEM_FUNC2_START        853
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2},
-#define USEM_FUNC2_END          892
-#define USEM_FUNC3_START        892
+#define USEM_FUNC2_END          855
+#define USEM_FUNC3_START        855
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2},
-#define USEM_FUNC3_END          894
-#define USEM_FUNC4_START        894
+#define USEM_FUNC3_END          857
+#define USEM_FUNC4_START        857
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2},
-#define USEM_FUNC4_END          896
-#define USEM_FUNC5_START        896
+#define USEM_FUNC4_END          859
+#define USEM_FUNC5_START        859
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2},
-#define USEM_FUNC5_END          898
-#define USEM_FUNC6_START        898
+#define USEM_FUNC5_END          861
+#define USEM_FUNC6_START        861
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2},
-#define USEM_FUNC6_END          900
-#define USEM_FUNC7_START        900
+#define USEM_FUNC6_END          863
+#define USEM_FUNC7_START        863
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4},
 	{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2},
-#define USEM_FUNC7_END          902
-#define CSEM_COMMON_START       902
+#define USEM_FUNC7_END          865
+#define CSEM_COMMON_START       865
 	{OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0},
 	{OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0},
 	{OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1128,29 +1091,29 @@
 	{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0},
 	{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0},
-	{OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a2},
+	{OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240},
 	{OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be},
 	{OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff},
-	{OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002aa},
+	{OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002a8},
 	{OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de},
 	{OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0},
-	{OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ba},
+	{OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002b8},
 	{OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee},
 	{OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000},
 	{OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000},
-	{OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002ca},
+	{OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002c8},
 	{OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe},
 	{OP_ZP_E1, CSEM_REG_PRAM, 0x32580000},
 	{OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000},
 	{OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96},
 	{OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f},
-	{OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402cc},
+	{OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402ca},
 	{OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300},
-#define CSEM_COMMON_END         981
-#define CSEM_PORT0_START        981
+#define CSEM_COMMON_END         944
+#define CSEM_PORT0_START        944
 	{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0},
 	{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10},
@@ -1163,8 +1126,8 @@
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30},
 	{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6},
 	{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30},
-#define CSEM_PORT0_END          993
-#define CSEM_PORT1_START        993
+#define CSEM_PORT0_END          956
+#define CSEM_PORT1_START        956
 	{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0},
 	{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10},
@@ -1177,43 +1140,43 @@
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30},
 	{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6},
 	{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30},
-#define CSEM_PORT1_END          1005
-#define CSEM_FUNC0_START        1005
+#define CSEM_PORT1_END          968
+#define CSEM_FUNC0_START        968
 	{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2},
-#define CSEM_FUNC0_END          1007
-#define CSEM_FUNC1_START        1007
+#define CSEM_FUNC0_END          970
+#define CSEM_FUNC1_START        970
 	{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2},
-#define CSEM_FUNC1_END          1009
-#define CSEM_FUNC2_START        1009
+#define CSEM_FUNC1_END          972
+#define CSEM_FUNC2_START        972
 	{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2},
-#define CSEM_FUNC2_END          1011
-#define CSEM_FUNC3_START        1011
+#define CSEM_FUNC2_END          974
+#define CSEM_FUNC3_START        974
 	{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2},
-#define CSEM_FUNC3_END          1013
-#define CSEM_FUNC4_START        1013
+#define CSEM_FUNC3_END          976
+#define CSEM_FUNC4_START        976
 	{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2},
-#define CSEM_FUNC4_END          1015
-#define CSEM_FUNC5_START        1015
+#define CSEM_FUNC4_END          978
+#define CSEM_FUNC5_START        978
 	{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2},
-#define CSEM_FUNC5_END          1017
-#define CSEM_FUNC6_START        1017
+#define CSEM_FUNC5_END          980
+#define CSEM_FUNC6_START        980
 	{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2},
-#define CSEM_FUNC6_END          1019
-#define CSEM_FUNC7_START        1019
+#define CSEM_FUNC6_END          982
+#define CSEM_FUNC7_START        982
 	{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0},
 	{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2},
-#define CSEM_FUNC7_END          1021
-#define XPB_COMMON_START        1021
+#define CSEM_FUNC7_END          984
+#define XPB_COMMON_START        984
 	{OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20},
-#define XPB_COMMON_END          1022
-#define DQ_COMMON_START         1022
+#define XPB_COMMON_END          985
+#define DQ_COMMON_START         985
 	{OP_WR, DORQ_REG_MODE_ACT, 0x2},
 	{OP_WR, DORQ_REG_NORM_CID_OFST, 0x3},
 	{OP_WR, DORQ_REG_OUTST_REQ, 0x4},
@@ -1232,8 +1195,8 @@
 	{OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c},
 	{OP_WR, DORQ_REG_REGN, 0x7c1004},
 	{OP_WR, DORQ_REG_IF_EN, 0xf},
-#define DQ_COMMON_END           1040
-#define TIMERS_COMMON_START     1040
+#define DQ_COMMON_END           1003
+#define TIMERS_COMMON_START     1003
 	{OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2},
 	{OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c},
 	{OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1},
@@ -1256,14 +1219,14 @@
 	{OP_WR, TM_REG_EN_CL0_INPUT, 0x1},
 	{OP_WR, TM_REG_EN_CL1_INPUT, 0x1},
 	{OP_WR, TM_REG_EN_CL2_INPUT, 0x1},
-#define TIMERS_COMMON_END       1062
-#define TIMERS_PORT0_START      1062
+#define TIMERS_COMMON_END       1025
+#define TIMERS_PORT0_START      1025
 	{OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2},
-#define TIMERS_PORT0_END        1063
-#define TIMERS_PORT1_START      1063
+#define TIMERS_PORT0_END        1026
+#define TIMERS_PORT1_START      1026
 	{OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2},
-#define TIMERS_PORT1_END        1064
-#define XSDM_COMMON_START       1064
+#define TIMERS_PORT1_END        1027
+#define XSDM_COMMON_START       1027
 	{OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614},
 	{OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424},
 	{OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600},
@@ -1311,8 +1274,8 @@
 	{OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8},
 	{OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1},
 	{OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa},
-#define XSDM_COMMON_END         1111
-#define QM_COMMON_START         1111
+#define XSDM_COMMON_END         1074
+#define QM_COMMON_START         1074
 	{OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6},
 	{OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5},
 	{OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa},
@@ -1613,8 +1576,8 @@
 	{OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5},
 	{OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7},
 	{OP_WR, QM_REG_CMINTEN, 0xff},
-#define QM_COMMON_END           1411
-#define PBF_COMMON_START        1411
+#define QM_COMMON_END           1374
+#define PBF_COMMON_START        1374
 	{OP_WR, PBF_REG_INIT, 0x1},
 	{OP_WR, PBF_REG_INIT_P4, 0x1},
 	{OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1},
@@ -1622,20 +1585,20 @@
 	{OP_WR, PBF_REG_INIT_P4, 0x0},
 	{OP_WR, PBF_REG_INIT, 0x0},
 	{OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0},
-#define PBF_COMMON_END          1418
-#define PBF_PORT0_START         1418
+#define PBF_COMMON_END          1381
+#define PBF_PORT0_START         1381
 	{OP_WR, PBF_REG_INIT_P0, 0x1},
 	{OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1},
 	{OP_WR, PBF_REG_INIT_P0, 0x0},
 	{OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0},
-#define PBF_PORT0_END           1422
-#define PBF_PORT1_START         1422
+#define PBF_PORT0_END           1385
+#define PBF_PORT1_START         1385
 	{OP_WR, PBF_REG_INIT_P1, 0x1},
 	{OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1},
 	{OP_WR, PBF_REG_INIT_P1, 0x0},
 	{OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0},
-#define PBF_PORT1_END           1426
-#define XCM_COMMON_START        1426
+#define PBF_PORT1_END           1389
+#define XCM_COMMON_START        1389
 	{OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32},
 	{OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020},
 	{OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020},
@@ -1670,7 +1633,7 @@
 	{OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f},
 	{OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20},
 	{OP_ZR, XCM_REG_XX_TABLE, 0x12},
-	{OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02ce},
+	{OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02cc},
 	{OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302},
 	{OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf},
 	{OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7},
@@ -1700,8 +1663,8 @@
 	{OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1},
 	{OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1},
 	{OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1},
-#define XCM_COMMON_END          1490
-#define XCM_PORT0_START         1490
+#define XCM_COMMON_END          1453
+#define XCM_PORT0_START         1453
 	{OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
 	{OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
 	{OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1710,8 +1673,8 @@
 	{OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2},
 	{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
 	{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
-#define XCM_PORT0_END           1498
-#define XCM_PORT1_START         1498
+#define XCM_PORT0_END           1461
+#define XCM_PORT1_START         1461
 	{OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
 	{OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
 	{OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1720,8 +1683,8 @@
 	{OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2},
 	{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
 	{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
-#define XCM_PORT1_END           1506
-#define XCM_FUNC0_START         1506
+#define XCM_PORT1_END           1469
+#define XCM_FUNC0_START         1469
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
 	{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1731,8 +1694,8 @@
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
 	{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC0_END           1515
-#define XCM_FUNC1_START         1515
+#define XCM_FUNC0_END           1478
+#define XCM_FUNC1_START         1478
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
 	{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1742,8 +1705,8 @@
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
 	{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC1_END           1524
-#define XCM_FUNC2_START         1524
+#define XCM_FUNC1_END           1487
+#define XCM_FUNC2_START         1487
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
 	{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1753,8 +1716,8 @@
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
 	{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC2_END           1533
-#define XCM_FUNC3_START         1533
+#define XCM_FUNC2_END           1496
+#define XCM_FUNC3_START         1496
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
 	{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1764,8 +1727,8 @@
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
 	{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC3_END           1542
-#define XCM_FUNC4_START         1542
+#define XCM_FUNC3_END           1505
+#define XCM_FUNC4_START         1505
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
 	{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1775,8 +1738,8 @@
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
 	{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC4_END           1551
-#define XCM_FUNC5_START         1551
+#define XCM_FUNC4_END           1514
+#define XCM_FUNC5_START         1514
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
 	{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1786,8 +1749,8 @@
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
 	{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC5_END           1560
-#define XCM_FUNC6_START         1560
+#define XCM_FUNC5_END           1523
+#define XCM_FUNC6_START         1523
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
 	{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1797,8 +1760,8 @@
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
 	{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC6_END           1569
-#define XCM_FUNC7_START         1569
+#define XCM_FUNC6_END           1532
+#define XCM_FUNC7_START         1532
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
 	{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
 	{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1808,8 +1771,8 @@
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
 	{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
 	{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC7_END           1578
-#define XSEM_COMMON_START       1578
+#define XCM_FUNC7_END           1541
+#define XSEM_COMMON_START       1541
 	{OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0},
 	{OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0},
 	{OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1876,9 +1839,9 @@
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2},
 	{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202ed},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202eb},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ef},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ed},
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0},
 	{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321},
@@ -1886,29 +1849,29 @@
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323},
 	{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0},
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f3},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f1},
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0},
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2},
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1},
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f5},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f3},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337},
 	{OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f7},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f5},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339},
 	{OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80307},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80305},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349},
 	{OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030f},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030d},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351},
 	{OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000},
 	{OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000},
-	{OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130317},
+	{OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130315},
 	{OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359},
 	{OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000},
 	{OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000},
@@ -1918,10 +1881,10 @@
 	{OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22},
 	{OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2},
 	{OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8},
-	{OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60319},
+	{OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60317},
 	{OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b},
-#define XSEM_COMMON_END         1688
-#define XSEM_PORT0_START        1688
+#define XSEM_COMMON_END         1651
+#define XSEM_PORT0_START        1651
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc},
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c},
@@ -1934,7 +1897,7 @@
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c},
 	{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x10031b},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x100319},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28},
 	{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc},
@@ -1950,12 +1913,12 @@
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d},
 	{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x2032b},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x20329},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4},
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42},
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4},
-#define XSEM_PORT0_END          1720
-#define XSEM_PORT1_START        1720
+#define XSEM_PORT0_END          1683
+#define XSEM_PORT1_START        1683
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc},
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c},
@@ -1968,7 +1931,7 @@
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c},
 	{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032d},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032b},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28},
 	{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc},
@@ -1984,65 +1947,65 @@
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f},
 	{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42},
-	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033d},
+	{OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033b},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4},
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42},
 	{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4},
-#define XSEM_PORT1_END          1752
-#define XSEM_FUNC0_START        1752
+#define XSEM_PORT1_END          1715
+#define XSEM_FUNC0_START        1715
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe},
-#define XSEM_FUNC0_END          1755
-#define XSEM_FUNC1_START        1755
+#define XSEM_FUNC0_END          1718
+#define XSEM_FUNC1_START        1718
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe},
-#define XSEM_FUNC1_END          1758
-#define XSEM_FUNC2_START        1758
+#define XSEM_FUNC1_END          1721
+#define XSEM_FUNC2_START        1721
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe},
-#define XSEM_FUNC2_END          1761
-#define XSEM_FUNC3_START        1761
+#define XSEM_FUNC2_END          1724
+#define XSEM_FUNC3_START        1724
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe},
-#define XSEM_FUNC3_END          1764
-#define XSEM_FUNC4_START        1764
+#define XSEM_FUNC3_END          1727
+#define XSEM_FUNC4_START        1727
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe},
-#define XSEM_FUNC4_END          1767
-#define XSEM_FUNC5_START        1767
+#define XSEM_FUNC4_END          1730
+#define XSEM_FUNC5_START        1730
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe},
-#define XSEM_FUNC5_END          1770
-#define XSEM_FUNC6_START        1770
+#define XSEM_FUNC5_END          1733
+#define XSEM_FUNC6_START        1733
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe},
-#define XSEM_FUNC6_END          1773
-#define XSEM_FUNC7_START        1773
+#define XSEM_FUNC6_END          1736
+#define XSEM_FUNC7_START        1736
 	{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0},
 	{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1},
 	{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe},
-#define XSEM_FUNC7_END          1776
-#define CDU_COMMON_START        1776
+#define XSEM_FUNC7_END          1739
+#define CDU_COMMON_START        1739
 	{OP_WR, CDU_REG_CDU_CONTROL0, 0x1},
 	{OP_WR_E1H, CDU_REG_MF_MODE, 0x1},
 	{OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000},
 	{OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d},
-	{OP_WB_E1, CDU_REG_L1TT, 0x200033f},
+	{OP_WB_E1, CDU_REG_L1TT, 0x200033d},
 	{OP_WB_E1H, CDU_REG_L1TT, 0x20003e1},
-	{OP_WB_E1, CDU_REG_MATT, 0x20053f},
+	{OP_WB_E1, CDU_REG_MATT, 0x20053d},
 	{OP_WB_E1H, CDU_REG_MATT, 0x2805e1},
 	{OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2},
-	{OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055f},
+	{OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055d},
 	{OP_ZR, CDU_REG_MATT + 0xa0, 0x18},
-#define CDU_COMMON_END          1787
-#define DMAE_COMMON_START       1787
+#define CDU_COMMON_END          1750
+#define DMAE_COMMON_START       1750
 	{OP_ZR, DMAE_REG_CMD_MEM, 0xe0},
 	{OP_WR, DMAE_REG_CRC16C_INIT, 0x0},
 	{OP_WR, DMAE_REG_CRC16T10_INIT, 0x1},
@@ -2050,24 +2013,24 @@
 	{OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2},
 	{OP_WR, DMAE_REG_PCI_IFEN, 0x1},
 	{OP_WR, DMAE_REG_GRC_IFEN, 0x1},
-#define DMAE_COMMON_END         1794
-#define PXP_COMMON_START        1794
-	{OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50565},
+#define DMAE_COMMON_END         1757
+#define PXP_COMMON_START        1757
+	{OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50563},
 	{OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609},
-	{OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x5056a},
+	{OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x50568},
 	{OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e},
-	{OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056f},
-#define PXP_COMMON_END          1799
-#define CFC_COMMON_START        1799
+	{OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056d},
+#define PXP_COMMON_END          1762
+#define CFC_COMMON_START        1762
 	{OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100},
 	{OP_WR, CFC_REG_CONTROL0, 0x10},
 	{OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff},
 	{OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a},
-#define CFC_COMMON_END          1803
-#define HC_COMMON_START         1803
+#define CFC_COMMON_END          1766
+#define HC_COMMON_START         1766
 	{OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4},
-#define HC_COMMON_END           1804
-#define HC_PORT0_START          1804
+#define HC_COMMON_END           1767
+#define HC_PORT0_START          1767
 	{OP_WR_E1, HC_REG_CONFIG_0, 0x1080},
 	{OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2},
 	{OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2086,8 +2049,8 @@
 	{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
 	{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
 	{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_PORT0_END            1822
-#define HC_PORT1_START          1822
+#define HC_PORT0_END            1785
+#define HC_PORT1_START          1785
 	{OP_WR_E1, HC_REG_CONFIG_1, 0x1080},
 	{OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2},
 	{OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2106,8 +2069,8 @@
 	{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
 	{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
 	{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_PORT1_END            1840
-#define HC_FUNC0_START          1840
+#define HC_PORT1_END            1803
+#define HC_FUNC0_START          1803
 	{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
 	{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0},
 	{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2123,8 +2086,8 @@
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC0_END            1855
-#define HC_FUNC1_START          1855
+#define HC_FUNC0_END            1818
+#define HC_FUNC1_START          1818
 	{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
 	{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1},
 	{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2140,8 +2103,8 @@
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC1_END            1870
-#define HC_FUNC2_START          1870
+#define HC_FUNC1_END            1833
+#define HC_FUNC2_START          1833
 	{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
 	{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2},
 	{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2157,8 +2120,8 @@
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC2_END            1885
-#define HC_FUNC3_START          1885
+#define HC_FUNC2_END            1848
+#define HC_FUNC3_START          1848
 	{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
 	{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3},
 	{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2174,8 +2137,8 @@
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC3_END            1900
-#define HC_FUNC4_START          1900
+#define HC_FUNC3_END            1863
+#define HC_FUNC4_START          1863
 	{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
 	{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4},
 	{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2191,8 +2154,8 @@
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC4_END            1915
-#define HC_FUNC5_START          1915
+#define HC_FUNC4_END            1878
+#define HC_FUNC5_START          1878
 	{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
 	{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5},
 	{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2208,8 +2171,8 @@
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC5_END            1930
-#define HC_FUNC6_START          1930
+#define HC_FUNC5_END            1893
+#define HC_FUNC6_START          1893
 	{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
 	{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6},
 	{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2225,8 +2188,8 @@
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC6_END            1945
-#define HC_FUNC7_START          1945
+#define HC_FUNC6_END            1908
+#define HC_FUNC7_START          1908
 	{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
 	{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7},
 	{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2242,8 +2205,8 @@
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
 	{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC7_END            1960
-#define PXP2_COMMON_START       1960
+#define HC_FUNC7_END            1923
+#define PXP2_COMMON_START       1923
 	{OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340},
 	{OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1},
 	{OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10},
@@ -2361,8 +2324,8 @@
 	{OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1},
 	{OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1},
 	{OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340},
-#define PXP2_COMMON_END         2077
-#define MISC_AEU_COMMON_START   2077
+#define PXP2_COMMON_END         2040
+#define MISC_AEU_COMMON_START   2040
 	{OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16},
 	{OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000},
 	{OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555},
@@ -2382,8 +2345,8 @@
 	{OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0},
 	{OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00},
 	{OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3},
-#define MISC_AEU_COMMON_END     2096
-#define MISC_AEU_PORT0_START    2096
+#define MISC_AEU_COMMON_END     2059
+#define MISC_AEU_PORT0_START    2059
 	{OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000},
 	{OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000},
 	{OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef},
@@ -2416,8 +2379,8 @@
 	{OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0},
 	{OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3},
 	{OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7},
-#define MISC_AEU_PORT0_END      2128
-#define MISC_AEU_PORT1_START    2128
+#define MISC_AEU_PORT0_END      2091
+#define MISC_AEU_PORT1_START    2091
 	{OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000},
 	{OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000},
 	{OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef},
@@ -2450,7 +2413,7 @@
 	{OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0},
 	{OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3},
 	{OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7},
-#define MISC_AEU_PORT1_END      2160
+#define MISC_AEU_PORT1_END      2123
 
 };
 
@@ -2560,103 +2523,92 @@
 	0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80,
 	0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280,
 	0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780,
-	0x000ddb00, 0x00001900, 0x00000028, 0x00000000, 0x00100000, 0x00000000,
-	0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+	0x000ddb00, 0x00001900, 0x00100000, 0x00000000, 0x00000000, 0xffffffff,
+	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+	0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, 0x00001500,
+	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	0xffffffff, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
 	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
 	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
 	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
 	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
 	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
-	0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
-	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000, 0x40000000,
+	0x00000000, 0x00003500, 0x00001000, 0x00002080, 0x00003100, 0x00004180,
+	0x00005200, 0x00006280, 0x00007300, 0x00008380, 0x00009400, 0x0000a480,
+	0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 0x0000f700, 0x00010780,
+	0x00011800, 0x00012880, 0x00013900, 0x00014980, 0x00015a00, 0x00016a80,
+	0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 0x0001bd00, 0x0001cd80,
+	0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 0x00010001, 0x00000604,
+	0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 0xcccccccc, 0x00000000,
+	0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
 	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
 	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
 	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
 	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
-	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
-	0x00000000, 0x00007ff8, 0x00000000, 0x00003500, 0x00001000, 0x00002080,
-	0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300, 0x00008380,
-	0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680,
-	0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900, 0x00014980,
-	0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80,
-	0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000,
-	0x00010001, 0x00000604, 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201,
-	0xcccccccc, 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000,
-	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
-	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
-	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
-	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
-	0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000,
-	0x00007ff8, 0x00000000, 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff,
+	0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000,
+	0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
+	0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
+	0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
 	0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
 	0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
-	0x00000000, 0x00100000, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
-	0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
-	0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
-	0x00000000, 0x00100000, 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c,
-	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
-	0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
-	0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
-	0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
-	0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
-	0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
-	0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
-	0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c,
-	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
-	0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
-	0xcdcdcdcd, 0xfffffff3, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
-	0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
+	0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
+	0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+	0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x30efffff, 0x0c30c30c,
 	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
 	0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
 	0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
 	0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
 	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
 	0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
-	0xcdcdcdcd, 0xfffffff7, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
-	0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c,
+	0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+	0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x302fffff, 0x0c30c30c,
 	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
-	0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
+	0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
 	0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
 	0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
 	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
 	0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
 	0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
 	0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
-	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
-	0x056fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
-	0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
-	0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c,
+	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7,
+	0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c,
+	0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+	0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x31efffff, 0x0c30c30c,
 	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
 	0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
 	0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
 	0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
 	0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
 	0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
-	0xcdcdcdcd, 0xffffff8a, 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
-	0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c,
+	0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+	0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x056fffff, 0x0c30c30c,
 	0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
 	0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
-	0xcdcdcdcd, 0xfffffff3, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
-	0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c,
+	0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+	0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
 	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
 	0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
 	0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
 	0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
-	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
-	0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
-	0xcdcdcdcd, 0xffffff97, 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
-	0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c,
-	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
-	0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
-	0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
-	0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
-	0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
-	0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
-	0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
-	0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
-	0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
-	0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
-	0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffff8a,
+	0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0010cf3c,
+	0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
+	0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c,
+	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
+	0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
+	0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+	0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
+	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
+	0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
+	0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+	0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
+	0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
+	0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
+	0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
 	0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
 	0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff,
 	0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c,
@@ -2678,16 +2630,27 @@
 	0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c,
 	0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
 	0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
-	0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000,
-	0x00070100, 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000f0280,
-	0x00010370, 0x00080000, 0x00080080, 0x00028100, 0x000b8128, 0x000201e0,
-	0x00010200, 0x00070210, 0x00020280, 0x000f0000, 0x000800f0, 0x00028170,
-	0x000b8198, 0x00020250, 0x00010270, 0x000b8280, 0x00080338, 0x00100000,
-	0x00080100, 0x00028180, 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298,
-	0x00080380, 0x00028000, 0x000b8028, 0x000200e0, 0x00010100, 0x00008110,
-	0x00000118, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000,
-	0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc,
-	0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000
+	0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
+	0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
+	0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+	0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
+	0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
+	0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
+	0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+	0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
+	0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
+	0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
+	0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+	0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 0x00070100, 0x00028170,
+	0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 0x00010370, 0x00080000,
+	0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 0x00010200, 0x00070210,
+	0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 0x000b8198, 0x00020250,
+	0x00010270, 0x000b8280, 0x00080338, 0x00100000, 0x00080100, 0x00028180,
+	0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 0x00080380, 0x00028000,
+	0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 0x00000118, 0xcccccccc,
+	0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc,
+	0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc,
+	0xcccccccc, 0x00002000
 };
 
 static const u32 init_data_e1h[] = {
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ff2743d..8b92c6a 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -31,17 +31,16 @@
 
 /********************************************************/
 #define SUPPORT_CL73 0 /* Currently no */
-#define ETH_HLEN 			14
+#define ETH_HLEN			14
 #define ETH_OVREHEAD		(ETH_HLEN + 8)/* 8 for CRC + VLAN*/
 #define ETH_MIN_PACKET_SIZE		60
 #define ETH_MAX_PACKET_SIZE		1500
 #define ETH_MAX_JUMBO_PACKET_SIZE	9600
 #define MDIO_ACCESS_TIMEOUT		1000
 #define BMAC_CONTROL_RX_ENABLE	2
-#define MAX_MTU_SIZE		5000
 
 /***********************************************************/
-/*                       Shortcut definitions              */
+/*			Shortcut definitions		   */
 /***********************************************************/
 
 #define NIG_STATUS_XGXS0_LINK10G \
@@ -80,12 +79,12 @@
 
 #define AUTONEG_CL37		SHARED_HW_CFG_AN_ENABLE_CL37
 #define AUTONEG_CL73		SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM			SHARED_HW_CFG_AN_ENABLE_BAM
-#define AUTONEG_PARALLEL		\
+#define AUTONEG_BAM 		SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_PARALLEL \
 				SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
-#define AUTONEG_SGMII_FIBER_AUTODET	\
+#define AUTONEG_SGMII_FIBER_AUTODET \
 				SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
-#define AUTONEG_REMOTE_PHY		SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+#define AUTONEG_REMOTE_PHY	SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
 
 #define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
 			MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
@@ -202,11 +201,10 @@
 	/* init emac - use read-modify-write */
 	/* self clear reset */
 	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
-	EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+	EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
 
 	timeout = 200;
-	do
-	{
+	do {
 		val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
 		DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
 		if (!timeout) {
@@ -214,18 +212,18 @@
 			return;
 		}
 		timeout--;
-	}while (val & EMAC_MODE_RESET);
+	} while (val & EMAC_MODE_RESET);
 
 	/* Set mac address */
 	val = ((params->mac_addr[0] << 8) |
 		params->mac_addr[1]);
-	EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
+	EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
 
 	val = ((params->mac_addr[2] << 24) |
 	       (params->mac_addr[3] << 16) |
 	       (params->mac_addr[4] << 8) |
 		params->mac_addr[5]);
-	EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
+	EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
 }
 
 static u8 bnx2x_emac_enable(struct link_params *params,
@@ -286,7 +284,7 @@
 	if (CHIP_REV_IS_SLOW(bp)) {
 		/* config GMII mode */
 		val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
-		EMAC_WR(EMAC_REG_EMAC_MODE,
+		EMAC_WR(bp, EMAC_REG_EMAC_MODE,
 			    (val | EMAC_MODE_PORT_GMII));
 	} else { /* ASIC */
 		/* pause enable/disable */
@@ -298,17 +296,19 @@
 				    EMAC_RX_MODE_FLOW_EN);
 
 		bnx2x_bits_dis(bp,  emac_base + EMAC_REG_EMAC_TX_MODE,
-			       EMAC_TX_MODE_EXT_PAUSE_EN);
+			     (EMAC_TX_MODE_EXT_PAUSE_EN |
+			      EMAC_TX_MODE_FLOW_EN));
 		if (vars->flow_ctrl & FLOW_CTRL_TX)
 			bnx2x_bits_en(bp, emac_base +
 				    EMAC_REG_EMAC_TX_MODE,
-				      EMAC_TX_MODE_EXT_PAUSE_EN);
+				   (EMAC_TX_MODE_EXT_PAUSE_EN |
+				    EMAC_TX_MODE_FLOW_EN));
 	}
 
 	/* KEEP_VLAN_TAG, promiscuous */
 	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
 	val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
-	EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
+	EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
 
 	/* Set Loopback */
 	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
@@ -316,10 +316,10 @@
 		val |= 0x810;
 	else
 		val &= ~0x810;
-	EMAC_WR(EMAC_REG_EMAC_MODE, val);
+	EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
 
 	/* enable emac for jumbo packets */
-	EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
+	EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
 		(EMAC_RX_MTU_SIZE_JUMBO_ENA |
 		 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
 
@@ -591,9 +591,9 @@
 			vars->flow_ctrl &= ~FLOW_CTRL_RX;
 
 		if (vars->phy_flags & PHY_XGXS_FLAG) {
-			if (params->req_line_speed &&
-			    ((params->req_line_speed == SPEED_10) ||
-			     (params->req_line_speed == SPEED_100))) {
+			if (vars->line_speed &&
+			    ((vars->line_speed == SPEED_10) ||
+			     (vars->line_speed == SPEED_100))) {
 				vars->phy_flags |= PHY_SGMII_FLAG;
 			} else {
 				vars->phy_flags &= ~PHY_SGMII_FLAG;
@@ -645,7 +645,7 @@
 	u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
 		NIG_REG_INGRESS_BMAC0_MEM;
 	u32 wb_data[2];
-    u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+	u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
 
 	/* Only if the bmac is out of reset */
 	if (REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -670,7 +670,6 @@
 	u8 port = params->port;
 	u32 init_crd, crd;
 	u32 count = 1000;
-	u32 pause = 0;
 
 	/* disable port */
 	REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
@@ -693,33 +692,25 @@
 		return -EINVAL;
 	}
 
-	if (flow_ctrl & FLOW_CTRL_RX)
-		pause = 1;
-	REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
-	if (pause) {
+	if (flow_ctrl & FLOW_CTRL_RX ||
+	    line_speed == SPEED_10 ||
+	    line_speed == SPEED_100 ||
+	    line_speed == SPEED_1000 ||
+	    line_speed == SPEED_2500) {
+		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
 		/* update threshold */
 		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
 		/* update init credit */
-		init_crd = 778;		/* (800-18-4) */
+		init_crd = 778; 	/* (800-18-4) */
 
 	} else {
 		u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
 			      ETH_OVREHEAD)/16;
-
+		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
 		/* update threshold */
 		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
 		/* update init credit */
 		switch (line_speed) {
-		case SPEED_10:
-		case SPEED_100:
-		case SPEED_1000:
-			init_crd = thresh + 55 - 22;
-			break;
-
-		case SPEED_2500:
-			init_crd = thresh + 138 - 22;
-			break;
-
 		case SPEED_10000:
 			init_crd = thresh + 553 - 22;
 			break;
@@ -764,10 +755,10 @@
 		emac_base = GRCBASE_EMAC0;
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
-		emac_base = (port) ? GRCBASE_EMAC0: GRCBASE_EMAC1;
+		emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
 		break;
 	default:
-		emac_base = (port) ? GRCBASE_EMAC1: GRCBASE_EMAC0;
+		emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 		break;
 	}
 	return emac_base;
@@ -1044,7 +1035,7 @@
 }
 
 static void bnx2x_set_parallel_detection(struct link_params *params,
-				       u8                phy_flags)
+				       u8       	 phy_flags)
 {
 	struct bnx2x *bp = params->bp;
 	u16 control2;
@@ -1114,7 +1105,7 @@
 			      MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
 
 	/* CL37 Autoneg Enabled */
-	if (params->req_line_speed == SPEED_AUTO_NEG)
+	if (vars->line_speed == SPEED_AUTO_NEG)
 		reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
 	else /* CL37 Autoneg Disabled */
 		reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
@@ -1132,7 +1123,7 @@
 			      MDIO_REG_BANK_SERDES_DIGITAL,
 			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
 	reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
-	if (params->req_line_speed == SPEED_AUTO_NEG)
+	if (vars->line_speed == SPEED_AUTO_NEG)
 		reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
 	else
 		reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
@@ -1148,7 +1139,7 @@
 			      MDIO_REG_BANK_BAM_NEXT_PAGE,
 			      MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
 			  &reg_val);
-	if (params->req_line_speed == SPEED_AUTO_NEG) {
+	if (vars->line_speed == SPEED_AUTO_NEG) {
 		/* Enable BAM aneg Mode and TetonII aneg Mode */
 		reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
 			    MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
@@ -1164,7 +1155,7 @@
 			      reg_val);
 
 	/* Enable Clause 73 Aneg */
-	if ((params->req_line_speed == SPEED_AUTO_NEG) &&
+	if ((vars->line_speed == SPEED_AUTO_NEG) &&
 	    (SUPPORT_CL73)) {
 		/* Enable BAM Station Manager */
 
@@ -1226,7 +1217,8 @@
 }
 
 /* program SerDes, forced speed */
-static void bnx2x_program_serdes(struct link_params *params)
+static void bnx2x_program_serdes(struct link_params *params,
+			       struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u16 reg_val;
@@ -1248,28 +1240,35 @@
 
 	/* program speed
 	   - needed only if the speed is greater than 1G (2.5G or 10G) */
-	if (!((params->req_line_speed == SPEED_1000) ||
-	      (params->req_line_speed == SPEED_100) ||
-	      (params->req_line_speed == SPEED_10))) {
-		CL45_RD_OVER_CL22(bp, params->port,
+	CL45_RD_OVER_CL22(bp, params->port,
 				      params->phy_addr,
 				      MDIO_REG_BANK_SERDES_DIGITAL,
 				      MDIO_SERDES_DIGITAL_MISC1, &reg_val);
-		/* clearing the speed value before setting the right speed */
-		reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
+	/* clearing the speed value before setting the right speed */
+	DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
+
+	reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
+		     MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+
+	if (!((vars->line_speed == SPEED_1000) ||
+	      (vars->line_speed == SPEED_100) ||
+	      (vars->line_speed == SPEED_10))) {
+
 		reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
 			    MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
-		if (params->req_line_speed == SPEED_10000)
+		if (vars->line_speed == SPEED_10000)
 			reg_val |=
 				MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
-		if (params->req_line_speed == SPEED_13000)
+		if (vars->line_speed == SPEED_13000)
 			reg_val |=
 				MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
-		CL45_WR_OVER_CL22(bp, params->port,
+	}
+
+	CL45_WR_OVER_CL22(bp, params->port,
 				      params->phy_addr,
 				      MDIO_REG_BANK_SERDES_DIGITAL,
 				      MDIO_SERDES_DIGITAL_MISC1, reg_val);
-	}
+
 }
 
 static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
@@ -1295,48 +1294,49 @@
 			      MDIO_OVER_1G_UP3, 0);
 }
 
-static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
-					   u32 *ieee_fc)
+static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc)
 {
-	struct bnx2x *bp = params->bp;
-	/* for AN, we are always publishing full duplex */
-	u16 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-
+	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
 	/* resolve pause mode and advertisement
 	 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
 
 	switch (params->req_flow_ctrl) {
 	case FLOW_CTRL_AUTO:
-		if (params->mtu <= MAX_MTU_SIZE) {
-			an_adv |=
+		if (params->req_fc_auto_adv == FLOW_CTRL_BOTH) {
+			*ieee_fc |=
 			     MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
 		} else {
-			an_adv |=
+			*ieee_fc |=
 		       MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
 		}
 		break;
 	case FLOW_CTRL_TX:
-		an_adv |=
+		*ieee_fc |=
 		       MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
 		break;
 
 	case FLOW_CTRL_RX:
 	case FLOW_CTRL_BOTH:
-		an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
 		break;
 
 	case FLOW_CTRL_NONE:
 	default:
-		an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
 		break;
 	}
+}
 
-	*ieee_fc = an_adv;
+static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
+					   u32 ieee_fc)
+{
+	struct bnx2x *bp = params->bp;
+	/* for AN, we are always publishing full duplex */
 
 	CL45_WR_OVER_CL22(bp, params->port,
 			      params->phy_addr,
 			      MDIO_REG_BANK_COMBO_IEEE0,
-			      MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
+			      MDIO_COMBO_IEEE0_AUTO_NEG_ADV, (u16)ieee_fc);
 }
 
 static void bnx2x_restart_autoneg(struct link_params *params)
@@ -1382,7 +1382,8 @@
 	}
 }
 
-static void bnx2x_initialize_sgmii_process(struct link_params *params)
+static void bnx2x_initialize_sgmii_process(struct link_params *params,
+					 struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u16 control1;
@@ -1406,7 +1407,7 @@
 			      control1);
 
 	/* if forced speed */
-	if (!(params->req_line_speed == SPEED_AUTO_NEG)) {
+	if (!(vars->line_speed == SPEED_AUTO_NEG)) {
 		/* set speed, disable autoneg */
 		u16 mii_control;
 
@@ -1419,7 +1420,7 @@
 				 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
 				 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
 
-		switch (params->req_line_speed) {
+		switch (vars->line_speed) {
 		case SPEED_100:
 			mii_control |=
 				MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
@@ -1433,8 +1434,8 @@
 			break;
 		default:
 			/* invalid speed for SGMII */
-			DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
-				  params->req_line_speed);
+			DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+				  vars->line_speed);
 			break;
 		}
 
@@ -1460,20 +1461,20 @@
  */
 
 static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
-{
-	switch (pause_result) {			/* ASYM P ASYM P */
-	case 0xb:				/*   1  0   1  1 */
+{						/*  LD	    LP	 */
+	switch (pause_result) { 		/* ASYM P ASYM P */
+	case 0xb:       			/*   1  0   1  1 */
 		vars->flow_ctrl = FLOW_CTRL_TX;
 		break;
 
-	case 0xe:				/*   1  1   1  0 */
+	case 0xe:       			/*   1  1   1  0 */
 		vars->flow_ctrl = FLOW_CTRL_RX;
 		break;
 
-	case 0x5:				/*   0  1   0  1 */
-	case 0x7:				/*   0  1   1  1 */
-	case 0xd:				/*   1  1   0  1 */
-	case 0xf:				/*   1  1   1  1 */
+	case 0x5:       			/*   0  1   0  1 */
+	case 0x7:       			/*   0  1   1  1 */
+	case 0xd:       			/*   1  1   0  1 */
+	case 0xf:       			/*   1  1   1  1 */
 		vars->flow_ctrl = FLOW_CTRL_BOTH;
 		break;
 
@@ -1531,6 +1532,28 @@
 		DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
 		   pause_result);
 		bnx2x_pause_resolve(vars, pause_result);
+		if (vars->flow_ctrl == FLOW_CTRL_NONE &&
+		     ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
+			bnx2x_cl45_read(bp, port,
+				      ext_phy_type,
+				      ext_phy_addr,
+				      MDIO_AN_DEVAD,
+				      MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+			bnx2x_cl45_read(bp, port,
+				      ext_phy_type,
+				      ext_phy_addr,
+				      MDIO_AN_DEVAD,
+				      MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+			pause_result = (ld_pause &
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+			pause_result |= (lp_pause &
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
+
+			bnx2x_pause_resolve(vars, pause_result);
+			DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
+				 pause_result);
+		}
 	}
 	return ret;
 }
@@ -1541,8 +1564,8 @@
 				  u32 gp_status)
 {
 	struct bnx2x *bp = params->bp;
-	u16 ld_pause;	/* local driver */
-	u16 lp_pause;	/* link partner */
+	u16 ld_pause;   /* local driver */
+	u16 lp_pause;   /* link partner */
 	u16 pause_result;
 
 	vars->flow_ctrl = FLOW_CTRL_NONE;
@@ -1573,13 +1596,10 @@
 		   (bnx2x_ext_phy_resove_fc(params, vars))) {
 		return;
 	} else {
-		vars->flow_ctrl = params->req_flow_ctrl;
-		if (vars->flow_ctrl == FLOW_CTRL_AUTO) {
-			if (params->mtu <= MAX_MTU_SIZE)
-				vars->flow_ctrl = FLOW_CTRL_BOTH;
-			else
-				vars->flow_ctrl = FLOW_CTRL_TX;
-		}
+		if (params->req_flow_ctrl == FLOW_CTRL_AUTO)
+			vars->flow_ctrl = params->req_fc_auto_adv;
+		else
+			vars->flow_ctrl = params->req_flow_ctrl;
 	}
 	DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
 }
@@ -1590,6 +1610,7 @@
 				      u32 gp_status)
 {
 	struct bnx2x *bp = params->bp;
+
 	u8 rc = 0;
 	vars->link_status = 0;
 
@@ -1690,7 +1711,11 @@
 
 		vars->link_status |= LINK_STATUS_SERDES_LINK;
 
-		if (params->req_line_speed == SPEED_AUTO_NEG) {
+		if ((params->req_line_speed == SPEED_AUTO_NEG) &&
+		    ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
+		    (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705))) {
 			vars->autoneg = AUTO_NEG_ENABLED;
 
 			if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
@@ -1705,18 +1730,18 @@
 
 		}
 		if (vars->flow_ctrl & FLOW_CTRL_TX)
-		       vars->link_status |=
-			LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+			vars->link_status |=
+				LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
 
 		if (vars->flow_ctrl & FLOW_CTRL_RX)
-		       vars->link_status |=
-			LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+			vars->link_status |=
+				LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
 
 	} else { /* link_down */
 		DP(NETIF_MSG_LINK, "phy link down\n");
 
 		vars->phy_link_up = 0;
-		vars->line_speed = 0;
+
 		vars->duplex = DUPLEX_FULL;
 		vars->flow_ctrl = FLOW_CTRL_NONE;
 		vars->autoneg = AUTO_NEG_DISABLED;
@@ -1817,15 +1842,15 @@
 }
 
 /*****************************************************************************/
-/*                           External Phy section                            */
+/*      		     External Phy section       		     */
 /*****************************************************************************/
-static void bnx2x_hw_reset(struct bnx2x *bp)
+static void bnx2x_hw_reset(struct bnx2x *bp, u8 port)
 {
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-		       MISC_REGISTERS_GPIO_OUTPUT_LOW);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 	msleep(1);
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-		      MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+		      MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
 }
 
 static void bnx2x_ext_phy_reset(struct link_params *params,
@@ -1854,10 +1879,11 @@
 
 			/* Restore normal power mode*/
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-				      MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+				      MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+					  params->port);
 
 			/* HW reset */
-			bnx2x_hw_reset(bp);
+			bnx2x_hw_reset(bp, params->port);
 
 			bnx2x_cl45_write(bp, params->port,
 				       ext_phy_type,
@@ -1869,7 +1895,8 @@
 			/* Unset Low Power Mode and SW reset */
 			/* Restore normal power mode*/
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-				      MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+				      MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+					  params->port);
 
 			DP(NETIF_MSG_LINK, "XGXS 8072\n");
 			bnx2x_cl45_write(bp, params->port,
@@ -1887,19 +1914,14 @@
 
 			/* Restore normal power mode*/
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-				      MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+				      MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+					  params->port);
 
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-				      MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+				      MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+					  params->port);
 
 			DP(NETIF_MSG_LINK, "XGXS 8073\n");
-			bnx2x_cl45_write(bp,
-				       params->port,
-				       ext_phy_type,
-				       ext_phy_addr,
-				       MDIO_PMA_DEVAD,
-				       MDIO_PMA_REG_CTRL,
-				       1<<15);
 			}
 			break;
 
@@ -1908,10 +1930,11 @@
 
 			/* Restore normal power mode*/
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-				      MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+				      MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+					  params->port);
 
 			/* HW reset */
-			bnx2x_hw_reset(bp);
+			bnx2x_hw_reset(bp, params->port);
 
 			break;
 
@@ -1934,7 +1957,7 @@
 
 		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
 			DP(NETIF_MSG_LINK, "SerDes 5482\n");
-			bnx2x_hw_reset(bp);
+			bnx2x_hw_reset(bp, params->port);
 			break;
 
 		default:
@@ -2098,42 +2121,45 @@
 
 }
 
-static void bnx2x_bcm8073_external_rom_boot(struct link_params *params)
+static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
+					  u8 ext_phy_addr)
 {
-	struct bnx2x *bp = params->bp;
-	u8 port = params->port;
-	u8 ext_phy_addr = ((params->ext_phy_config &
-			     PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
-			    PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
-	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-	u16 fw_ver1, fw_ver2, val;
-	/* Need to wait 100ms after reset */
-	msleep(100);
-	/* Boot port from external ROM	*/
+	u16 fw_ver1, fw_ver2;
+	/* Boot port from external ROM  */
 	/* EDC grst */
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+	bnx2x_cl45_write(bp, port,
+		       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_GEN_CTRL,
 		       0x0001);
 
 	/* ucode reboot and rst */
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+	bnx2x_cl45_write(bp, port,
+		       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_GEN_CTRL,
 		       0x008c);
 
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+	bnx2x_cl45_write(bp, port,
+		       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 
 	/* Reset internal microprocessor */
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+	bnx2x_cl45_write(bp, port,
+		       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_GEN_CTRL,
 		       MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 
 	/* Release srst bit */
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+	bnx2x_cl45_write(bp, port,
+		       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_GEN_CTRL,
 		       MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
@@ -2142,35 +2168,52 @@
 	msleep(100);
 
 	/* Clear ser_boot_ctl bit */
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+	bnx2x_cl45_write(bp, port,
+		       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_MISC_CTRL1, 0x0000);
 
-	bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_ROM_VER1, &fw_ver1);
-	bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+	bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+		      ext_phy_addr,
+		      MDIO_PMA_DEVAD,
+		      MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+	bnx2x_cl45_read(bp, port,
+		      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+		      ext_phy_addr,
+		      MDIO_PMA_DEVAD,
+		      MDIO_PMA_REG_ROM_VER2, &fw_ver2);
 	DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
 
-	/* Only set bit 10 = 1 (Tx power down) */
-	bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_TX_POWER_DOWN, &val);
-
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_TX_POWER_DOWN, (val | 1<<10));
-
-	msleep(600);
-	/* Release bit 10 (Release Tx power down) */
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
-
 }
 
+static void bnx2x_bcm807x_force_10G(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u8 ext_phy_addr = ((params->ext_phy_config &
+				PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+				PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+
+	/* Force KR or KX */
+	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+		       MDIO_PMA_DEVAD,
+		       MDIO_PMA_REG_CTRL,
+		       0x2040);
+	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+		       MDIO_PMA_DEVAD,
+		       MDIO_PMA_REG_10G_CTRL2,
+		       0x000b);
+	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+		       MDIO_PMA_DEVAD,
+		       MDIO_PMA_REG_BCM_CTRL,
+		       0x0000);
+	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+		       MDIO_AN_DEVAD,
+		       MDIO_AN_REG_CTRL,
+		       0x0000);
+}
 static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
@@ -2236,32 +2279,51 @@
 	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
 		       MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
 }
-static void bnx2x_bcm807x_force_10G(struct link_params *params)
+
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+				  struct link_vars *vars)
 {
+
 	struct bnx2x *bp = params->bp;
-	u8 port = params->port;
+	u16 cl37_val;
 	u8 ext_phy_addr = ((params->ext_phy_config &
 				PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
 				PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
 	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
 
-	/* Force KR or KX */
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_CTRL,
-		       0x2040);
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_10G_CTRL2,
-		       0x000b);
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_BCM_CTRL,
-		       0x0000);
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+	bnx2x_cl45_read(bp, params->port,
+		      ext_phy_type,
+		      ext_phy_addr,
+		      MDIO_AN_DEVAD,
+		      MDIO_AN_REG_CL37_FC_LD, &cl37_val);
+
+	cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+		cl37_val |=  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+	}
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+		cl37_val |=  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+	}
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+		cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+	}
+	DP(NETIF_MSG_LINK,
+		 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+
+	bnx2x_cl45_write(bp, params->port,
+		       ext_phy_type,
+		       ext_phy_addr,
 		       MDIO_AN_DEVAD,
-		       MDIO_AN_REG_CTRL,
-		       0x0000);
+		       MDIO_AN_REG_CL37_FC_LD, cl37_val);
+	msleep(500);
 }
 
 static void bnx2x_ext_phy_set_pause(struct link_params *params,
@@ -2282,13 +2344,16 @@
 		      MDIO_AN_REG_ADV_PAUSE, &val);
 
 	val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
 	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
 
-	if (vars->ieee_fc &
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
 	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
 		val |=  MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
 	}
-	if (vars->ieee_fc &
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
 	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
 		val |=
 		 MDIO_AN_REG_ADV_PAUSE_PAUSE;
@@ -2302,6 +2367,65 @@
 		       MDIO_AN_REG_ADV_PAUSE, val);
 }
 
+
+static void bnx2x_init_internal_phy(struct link_params *params,
+				struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+		u16 bank, rx_eq;
+
+		rx_eq = ((params->serdes_config &
+			  PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
+			 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
+
+		DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
+		for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
+		      bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
+			CL45_WR_OVER_CL22(bp, port,
+					      params->phy_addr,
+					      bank ,
+					      MDIO_RX0_RX_EQ_BOOST,
+					      ((rx_eq &
+				MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
+				MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
+		}
+
+		/* forced speed requested? */
+		if (vars->line_speed != SPEED_AUTO_NEG) {
+			DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+
+			/* disable autoneg */
+			bnx2x_set_autoneg(params, vars);
+
+			/* program speed and duplex */
+			bnx2x_program_serdes(params, vars);
+
+		} else { /* AN_mode */
+			DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+
+			/* AN enabled */
+			bnx2x_set_brcm_cl37_advertisment(params);
+
+			/* program duplex & pause advertisement (for aneg) */
+			bnx2x_set_ieee_aneg_advertisment(params,
+						       vars->ieee_fc);
+
+			/* enable autoneg */
+			bnx2x_set_autoneg(params, vars);
+
+			/* enable and restart AN */
+			bnx2x_restart_autoneg(params);
+		}
+
+	} else { /* SGMII mode */
+		DP(NETIF_MSG_LINK, "SGMII\n");
+
+		bnx2x_initialize_sgmii_process(params, vars);
+	}
+}
+
 static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
@@ -2343,7 +2467,6 @@
 
 		switch (ext_phy_type) {
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
-			DP(NETIF_MSG_LINK, "XGXS Direct\n");
 			break;
 
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
@@ -2419,7 +2542,7 @@
 					       ext_phy_type,
 					       ext_phy_addr,
 					       MDIO_AN_DEVAD,
-					       MDIO_AN_REG_CL37_FD,
+					       MDIO_AN_REG_CL37_FC_LP,
 					       0x0020);
 				/* Enable CL37 AN */
 				bnx2x_cl45_write(bp, params->port,
@@ -2458,47 +2581,17 @@
 				rx_alarm_ctrl_val = 0x400;
 				lasi_ctrl_val = 0x0004;
 			} else {
-				/* In 8073, port1 is directed through emac0 and
-				 * port0 is directed through emac1
-				 */
 				rx_alarm_ctrl_val = (1<<2);
-				/*lasi_ctrl_val = 0x0005;*/
 				lasi_ctrl_val = 0x0004;
 			}
 
-			/* Wait for soft reset to get cleared upto 1 sec */
-			for (cnt = 0; cnt < 1000; cnt++) {
-				bnx2x_cl45_read(bp, params->port,
-					      ext_phy_type,
-					      ext_phy_addr,
-					      MDIO_PMA_DEVAD,
-					      MDIO_PMA_REG_CTRL,
-					      &ctrl);
-				if (!(ctrl & (1<<15)))
-					break;
-				msleep(1);
-			}
-			DP(NETIF_MSG_LINK,
-				"807x control reg 0x%x (after %d ms)\n",
-				ctrl, cnt);
-
-			if (ext_phy_type ==
-			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){
-				bnx2x_bcm8072_external_rom_boot(params);
-			} else {
-				bnx2x_bcm8073_external_rom_boot(params);
-				/* In case of 8073 with long xaui lines,
-				don't set the 8073 xaui low power*/
-				bnx2x_bcm8073_set_xaui_low_power_mode(params);
-			}
-
 			/* enable LASI */
 			bnx2x_cl45_write(bp, params->port,
-				       ext_phy_type,
-				       ext_phy_addr,
-				       MDIO_PMA_DEVAD,
-				       MDIO_PMA_REG_RX_ALARM_CTRL,
-				       rx_alarm_ctrl_val);
+				   ext_phy_type,
+				   ext_phy_addr,
+				   MDIO_PMA_DEVAD,
+				   MDIO_PMA_REG_RX_ALARM_CTRL,
+				   rx_alarm_ctrl_val);
 
 			bnx2x_cl45_write(bp, params->port,
 				       ext_phy_type,
@@ -2507,6 +2600,25 @@
 				       MDIO_PMA_REG_LASI_CTRL,
 				       lasi_ctrl_val);
 
+			bnx2x_8073_set_pause_cl37(params, vars);
+
+			if (ext_phy_type ==
+			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){
+				bnx2x_bcm8072_external_rom_boot(params);
+			} else {
+
+				/* In case of 8073 with long xaui lines,
+				don't set the 8073 xaui low power*/
+				bnx2x_bcm8073_set_xaui_low_power_mode(params);
+			}
+
+			bnx2x_cl45_read(bp, params->port,
+				      ext_phy_type,
+				      ext_phy_addr,
+				      MDIO_PMA_DEVAD,
+				      0xca13,
+				      &tmp1);
+
 			bnx2x_cl45_read(bp, params->port,
 				      ext_phy_type,
 				      ext_phy_addr,
@@ -2519,12 +2631,21 @@
 			/* If this is forced speed, set to KR or KX
 			 * (all other are not supported)
 			 */
-			if (!(params->req_line_speed == SPEED_AUTO_NEG)) {
-			if (params->req_line_speed == SPEED_10000) {
-					bnx2x_bcm807x_force_10G(params);
-					DP(NETIF_MSG_LINK,
-					   "Forced speed 10G on 807X\n");
-					break;
+			if (params->loopback_mode == LOOPBACK_EXT) {
+				bnx2x_bcm807x_force_10G(params);
+				DP(NETIF_MSG_LINK,
+					"Forced speed 10G on 807X\n");
+				break;
+			} else {
+				bnx2x_cl45_write(bp, params->port,
+					       ext_phy_type, ext_phy_addr,
+					       MDIO_PMA_DEVAD,
+					       MDIO_PMA_REG_BCM_CTRL,
+					       0x0002);
+			}
+			if (params->req_line_speed != SPEED_AUTO_NEG) {
+				if (params->req_line_speed == SPEED_10000) {
+					val = (1<<7);
 				} else if (params->req_line_speed ==
 					   SPEED_2500) {
 					val = (1<<5);
@@ -2539,11 +2660,14 @@
 					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
 					val |= (1<<7);
 
+				/* Note that 2.5G works only when
+				used with 1G advertisment */
 				if (params->speed_cap_mask &
-					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+					(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+					 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
 					val |= (1<<5);
-				DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
-				/*val = ((1<<5)|(1<<7));*/
+				DP(NETIF_MSG_LINK,
+					 "807x autoneg val = 0x%x\n", val);
 			}
 
 			bnx2x_cl45_write(bp, params->port,
@@ -2554,20 +2678,19 @@
 
 			if (ext_phy_type ==
 			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
-				/* Disable 2.5Ghz */
+
 				bnx2x_cl45_read(bp, params->port,
 					      ext_phy_type,
 					      ext_phy_addr,
 					      MDIO_AN_DEVAD,
 					      0x8329, &tmp1);
-/* SUPPORT_SPEED_CAPABILITY
-				(Due to the nature of the link order, its not
-				possible to enable 2.5G within the autoneg
-				capabilities)
-				if (params->speed_cap_mask &
-				PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
-*/
-				if (params->req_line_speed == SPEED_2500) {
+
+				if (((params->speed_cap_mask &
+				      PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+				     (params->req_line_speed ==
+				      SPEED_AUTO_NEG)) ||
+				    (params->req_line_speed ==
+				     SPEED_2500)) {
 					u16 phy_ver;
 					/* Allow 2.5G for A1 and above */
 					bnx2x_cl45_read(bp, params->port,
@@ -2575,49 +2698,53 @@
 					 ext_phy_addr,
 					 MDIO_PMA_DEVAD,
 					 0xc801, &phy_ver);
-
+					DP(NETIF_MSG_LINK, "Add 2.5G\n");
 					if (phy_ver > 0)
 						tmp1 |= 1;
 					else
 						tmp1 &= 0xfffe;
-			}
-				else
+				} else {
+					DP(NETIF_MSG_LINK, "Disable 2.5G\n");
 					tmp1 &= 0xfffe;
+				}
+
+				bnx2x_cl45_write(bp, params->port,
+					       ext_phy_type,
+					       ext_phy_addr,
+					       MDIO_AN_DEVAD,
+					       0x8329, tmp1);
+			}
+
+			/* Add support for CL37 (passive mode) II */
+
+			bnx2x_cl45_read(bp, params->port,
+				       ext_phy_type,
+				       ext_phy_addr,
+				       MDIO_AN_DEVAD,
+				       MDIO_AN_REG_CL37_FC_LD,
+				       &tmp1);
 
 			bnx2x_cl45_write(bp, params->port,
 				       ext_phy_type,
 				       ext_phy_addr,
 				       MDIO_AN_DEVAD,
-					       0x8329, tmp1);
-			}
-			/* Add support for CL37 (passive mode) I */
-			bnx2x_cl45_write(bp, params->port,
-				       ext_phy_type,
-				       ext_phy_addr,
-				       MDIO_AN_DEVAD,
-				       MDIO_AN_REG_CL37_CL73, 0x040c);
-			/* Add support for CL37 (passive mode) II */
-			bnx2x_cl45_write(bp, params->port,
-				       ext_phy_type,
-				       ext_phy_addr,
-				       MDIO_AN_DEVAD,
-				       MDIO_AN_REG_CL37_FD, 0x20);
+				       MDIO_AN_REG_CL37_FC_LD, (tmp1 |
+				       ((params->req_duplex == DUPLEX_FULL) ?
+				       0x20 : 0x40)));
+
 			/* Add support for CL37 (passive mode) III */
 			bnx2x_cl45_write(bp, params->port,
 				       ext_phy_type,
 				       ext_phy_addr,
 				       MDIO_AN_DEVAD,
 				       MDIO_AN_REG_CL37_AN, 0x1000);
-			/* Restart autoneg */
-			msleep(500);
 
 			if (ext_phy_type ==
 			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
-
-			/* The SNR will improve about 2db by changing the
+				/* The SNR will improve about 2db by changing
 				BW and FEE main tap. Rest commands are executed
 				after link is up*/
-			/* Change FFE main cursor to 5 in EDC register */
+				/*Change FFE main cursor to 5 in EDC register*/
 				if (bnx2x_8073_is_snr_needed(params))
 					bnx2x_cl45_write(bp, params->port,
 						    ext_phy_type,
@@ -2626,25 +2753,28 @@
 						    MDIO_PMA_REG_EDC_FFE_MAIN,
 						    0xFB0C);
 
-			/* Enable FEC (Forware Error Correction)
-			   Request in the AN */
-			bnx2x_cl45_read(bp, params->port,
-				      ext_phy_type,
-				      ext_phy_addr,
-				      MDIO_AN_DEVAD,
-				      MDIO_AN_REG_ADV2, &tmp1);
+				/* Enable FEC (Forware Error Correction)
+				Request in the AN */
+				bnx2x_cl45_read(bp, params->port,
+					      ext_phy_type,
+					      ext_phy_addr,
+					      MDIO_AN_DEVAD,
+					      MDIO_AN_REG_ADV2, &tmp1);
 
-			tmp1 |= (1<<15);
+				tmp1 |= (1<<15);
 
-			bnx2x_cl45_write(bp, params->port,
-				      ext_phy_type,
-				      ext_phy_addr,
-				      MDIO_AN_DEVAD,
-				      MDIO_AN_REG_ADV2, tmp1);
+				bnx2x_cl45_write(bp, params->port,
+					       ext_phy_type,
+					       ext_phy_addr,
+					       MDIO_AN_DEVAD,
+					       MDIO_AN_REG_ADV2, tmp1);
+
 			}
 
 			bnx2x_ext_phy_set_pause(params, vars);
 
+			/* Restart autoneg */
+			msleep(500);
 			bnx2x_cl45_write(bp, params->port,
 				       ext_phy_type,
 				       ext_phy_addr,
@@ -2701,10 +2831,7 @@
 		}
 
 	} else { /* SerDes */
-/*		ext_phy_addr = ((bp->ext_phy_config &
-				 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
-				PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
-*/
+
 		ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
 		switch (ext_phy_type) {
 		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
@@ -2726,7 +2853,7 @@
 
 
 static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
-				  struct link_vars *vars)
+				 struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u32 ext_phy_type;
@@ -2767,6 +2894,8 @@
 				      MDIO_PMA_REG_RX_SD, &rx_sd);
 			DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
 			ext_phy_link_up = (rx_sd & 0x1);
+			if (ext_phy_link_up)
+				vars->line_speed = SPEED_10000;
 			break;
 
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
@@ -2810,6 +2939,13 @@
 			 */
 			ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
 					   (val2 & (1<<1)));
+			if (ext_phy_link_up) {
+				if (val2 & (1<<1))
+					vars->line_speed = SPEED_1000;
+				else
+					vars->line_speed = SPEED_10000;
+			}
+
 			/* clear LASI indication*/
 			bnx2x_cl45_read(bp, params->port, ext_phy_type,
 				      ext_phy_addr,
@@ -2820,6 +2956,8 @@
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
 		{
+			u16 link_status = 0;
+			u16 an1000_status = 0;
 			if (ext_phy_type ==
 			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
 				bnx2x_cl45_read(bp, params->port,
@@ -2846,14 +2984,9 @@
 					      MDIO_PMA_DEVAD,
 					      MDIO_PMA_REG_LASI_STATUS, &val1);
 
-				bnx2x_cl45_read(bp, params->port,
-					      ext_phy_type,
-					      ext_phy_addr,
-					      MDIO_PMA_DEVAD,
-					      MDIO_PMA_REG_LASI_STATUS, &val2);
 				DP(NETIF_MSG_LINK,
-					 "8703 LASI status 0x%x->0x%x\n",
-					  val1, val2);
+					 "8703 LASI status 0x%x\n",
+					  val1);
 			}
 
 			/* clear the interrupt LASI status register */
@@ -2869,20 +3002,23 @@
 				      MDIO_PCS_REG_STATUS, &val1);
 			DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
 			   val2, val1);
+			/* Clear MSG-OUT */
+			bnx2x_cl45_read(bp, params->port,
+				      ext_phy_type,
+				      ext_phy_addr,
+				      MDIO_PMA_DEVAD,
+				      0xca13,
+				      &val1);
+
 			/* Check the LASI */
 			bnx2x_cl45_read(bp, params->port,
 				      ext_phy_type,
 				      ext_phy_addr,
 				      MDIO_PMA_DEVAD,
 				      MDIO_PMA_REG_RX_ALARM, &val2);
-			bnx2x_cl45_read(bp, params->port,
-				      ext_phy_type,
-				      ext_phy_addr,
-				      MDIO_PMA_DEVAD,
-				      MDIO_PMA_REG_RX_ALARM,
-				      &val1);
-			DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
-			   val2, val1);
+
+			DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
 			/* Check the link status */
 			bnx2x_cl45_read(bp, params->port,
 				      ext_phy_type,
@@ -2905,29 +3041,29 @@
 			DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
 			if (ext_phy_type ==
 			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
-				u16 an1000_status = 0;
+
 				if (ext_phy_link_up &&
-				    (
-				     (params->req_line_speed != SPEED_10000)
-				     )) {
+				    ((params->req_line_speed !=
+					SPEED_10000))) {
 					if (bnx2x_bcm8073_xaui_wa(params)
 					     != 0) {
 						ext_phy_link_up = 0;
 						break;
 					}
-					bnx2x_cl45_read(bp, params->port,
-						      ext_phy_type,
-						      ext_phy_addr,
-						      MDIO_XS_DEVAD,
-						      0x8304,
-						      &an1000_status);
-					bnx2x_cl45_read(bp, params->port,
-						      ext_phy_type,
-						      ext_phy_addr,
-						      MDIO_XS_DEVAD,
-						      0x8304,
-						      &an1000_status);
 				}
+				bnx2x_cl45_read(bp, params->port,
+						      ext_phy_type,
+						      ext_phy_addr,
+						      MDIO_AN_DEVAD,
+						      0x8304,
+						      &an1000_status);
+				bnx2x_cl45_read(bp, params->port,
+						      ext_phy_type,
+						      ext_phy_addr,
+						      MDIO_AN_DEVAD,
+						      0x8304,
+						      &an1000_status);
+
 				/* Check the link status on 1.1.2 */
 				bnx2x_cl45_read(bp, params->port,
 					      ext_phy_type,
@@ -2943,8 +3079,8 @@
 					     "an_link_status=0x%x\n",
 					  val2, val1, an1000_status);
 
-				ext_phy_link_up = (((val1 & 4) == 4) ||
-						    (an1000_status & (1<<1)));
+					ext_phy_link_up = (((val1 & 4) == 4) ||
+						(an1000_status & (1<<1)));
 				if (ext_phy_link_up &&
 				    bnx2x_8073_is_snr_needed(params)) {
 					/* The SNR will improve about 2dbby
@@ -2968,8 +3104,74 @@
 						    MDIO_PMA_REG_CDR_BANDWIDTH,
 						    0x0333);
 
+
+				}
+				bnx2x_cl45_read(bp, params->port,
+						      ext_phy_type,
+						      ext_phy_addr,
+						      MDIO_PMA_DEVAD,
+						      0xc820,
+						      &link_status);
+
+				/* Bits 0..2 --> speed detected,
+				   bits 13..15--> link is down */
+				if ((link_status & (1<<2)) &&
+				    (!(link_status & (1<<15)))) {
+					ext_phy_link_up = 1;
+					vars->line_speed = SPEED_10000;
+					DP(NETIF_MSG_LINK,
+						 "port %x: External link"
+						 " up in 10G\n", params->port);
+				} else if ((link_status & (1<<1)) &&
+					   (!(link_status & (1<<14)))) {
+					ext_phy_link_up = 1;
+					vars->line_speed = SPEED_2500;
+					DP(NETIF_MSG_LINK,
+						 "port %x: External link"
+						 " up in 2.5G\n", params->port);
+				} else if ((link_status & (1<<0)) &&
+					   (!(link_status & (1<<13)))) {
+					ext_phy_link_up = 1;
+					vars->line_speed = SPEED_1000;
+					DP(NETIF_MSG_LINK,
+						 "port %x: External link"
+						 " up in 1G\n", params->port);
+				} else {
+					ext_phy_link_up = 0;
+					DP(NETIF_MSG_LINK,
+						 "port %x: External link"
+						 " is down\n", params->port);
+				}
+			} else {
+				/* See if 1G link is up for the 8072 */
+				bnx2x_cl45_read(bp, params->port,
+						      ext_phy_type,
+						      ext_phy_addr,
+						      MDIO_AN_DEVAD,
+						      0x8304,
+						      &an1000_status);
+				bnx2x_cl45_read(bp, params->port,
+						      ext_phy_type,
+						      ext_phy_addr,
+						      MDIO_AN_DEVAD,
+						      0x8304,
+						      &an1000_status);
+				if (an1000_status & (1<<1)) {
+					ext_phy_link_up = 1;
+					vars->line_speed = SPEED_1000;
+					DP(NETIF_MSG_LINK,
+						 "port %x: External link"
+						 " up in 1G\n", params->port);
+				} else if (ext_phy_link_up) {
+					ext_phy_link_up = 1;
+					vars->line_speed = SPEED_10000;
+					DP(NETIF_MSG_LINK,
+						 "port %x: External link"
+						 " up in 10G\n", params->port);
 				}
 			}
+
+
 			break;
 		}
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
@@ -3006,6 +3208,7 @@
 					      MDIO_AN_DEVAD,
 					      MDIO_AN_REG_MASTER_STATUS,
 					      &val2);
+				vars->line_speed = SPEED_10000;
 				DP(NETIF_MSG_LINK,
 					 "SFX7101 AN status 0x%x->Master=%x\n",
 					  val2,
@@ -3100,7 +3303,7 @@
  * link management
  */
 static void bnx2x_link_int_ack(struct link_params *params,
-			     struct link_vars *vars, u16 is_10g)
+			     struct link_vars *vars, u8 is_10g)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
@@ -3181,7 +3384,8 @@
 }
 
 
-static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
+static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr,
+			   u32 ext_phy_type)
 {
 	u32 cnt = 0;
 	u16 ctrl = 0;
@@ -3192,12 +3396,14 @@
 
 	/* take ext phy out of reset */
 	bnx2x_set_gpio(bp,
-			MISC_REGISTERS_GPIO_2,
-			MISC_REGISTERS_GPIO_HIGH);
+			  MISC_REGISTERS_GPIO_2,
+			  MISC_REGISTERS_GPIO_HIGH,
+			  port);
 
 	bnx2x_set_gpio(bp,
-			MISC_REGISTERS_GPIO_1,
-			MISC_REGISTERS_GPIO_HIGH);
+			  MISC_REGISTERS_GPIO_1,
+			  MISC_REGISTERS_GPIO_HIGH,
+			  port);
 
 	/* wait for 5ms */
 	msleep(5);
@@ -3205,7 +3411,7 @@
 	for (cnt = 0; cnt < 1000; cnt++) {
 		msleep(1);
 		bnx2x_cl45_read(bp, port,
-			      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+			      ext_phy_type,
 			      ext_phy_addr,
 			      MDIO_PMA_DEVAD,
 			      MDIO_PMA_REG_CTRL,
@@ -3217,13 +3423,17 @@
 	}
 }
 
-static void bnx2x_turn_off_sf(struct bnx2x *bp)
+static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port)
 {
 	/* put sf to reset */
-	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_LOW);
 	bnx2x_set_gpio(bp,
-			MISC_REGISTERS_GPIO_2,
-			MISC_REGISTERS_GPIO_LOW);
+			  MISC_REGISTERS_GPIO_1,
+			  MISC_REGISTERS_GPIO_LOW,
+			  port);
+	bnx2x_set_gpio(bp,
+			  MISC_REGISTERS_GPIO_2,
+			  MISC_REGISTERS_GPIO_LOW,
+			  port);
 }
 
 u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
@@ -3253,7 +3463,8 @@
 
 		/* Take ext phy out of reset */
 		if (!driver_loaded)
-			bnx2x_turn_on_sf(bp, params->port, ext_phy_addr);
+			bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
+				       ext_phy_type);
 
 		/*  wait for 1ms */
 		msleep(1);
@@ -3276,11 +3487,16 @@
 		version[4] = '\0';
 
 		if (!driver_loaded)
-			bnx2x_turn_off_sf(bp);
+			bnx2x_turn_off_sf(bp, params->port);
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
 	{
+		/* Take ext phy out of reset */
+		if (!driver_loaded)
+			bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
+				       ext_phy_type);
+
 		bnx2x_cl45_read(bp, params->port, ext_phy_type,
 			      ext_phy_addr,
 			      MDIO_PMA_DEVAD,
@@ -3333,7 +3549,7 @@
 	struct bnx2x *bp = params->bp;
 
 	if (is_10g) {
-		 u32 md_devad;
+		u32 md_devad;
 
 		DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
 
@@ -3553,6 +3769,8 @@
 	       u16 hw_led_mode, u32 chip_id)
 {
 	u8 rc = 0;
+	u32 tmp;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 	DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
 	DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
 		 speed, hw_led_mode);
@@ -3561,6 +3779,9 @@
 		REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
 		REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
 			   SHARED_HW_CFG_LED_MAC1);
+
+		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+		EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
 		break;
 
 	case LED_MODE_OPER:
@@ -3572,6 +3793,10 @@
 			   LED_BLINK_RATE_VAL);
 		REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
 			   port*4, 1);
+		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+		EMAC_WR(bp, EMAC_REG_EMAC_LED,
+			    (tmp & (~EMAC_LED_OVERRIDE)));
+
 		if (!CHIP_IS_E1H(bp) &&
 		    ((speed == SPEED_2500) ||
 		     (speed == SPEED_1000) ||
@@ -3622,7 +3847,8 @@
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
 	u8 rc = 0;
-
+	u8 non_ext_phy;
+	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
 	/* Activate the external PHY */
 	bnx2x_ext_phy_reset(params, vars);
 
@@ -3644,10 +3870,6 @@
 		bnx2x_set_swap_lanes(params);
 	}
 
-	/* Set Parallel Detect */
-	if (params->req_line_speed == SPEED_AUTO_NEG)
-		bnx2x_set_parallel_detection(params, vars->phy_flags);
-
 	if (vars->phy_flags & PHY_XGXS_FLAG) {
 		if (params->req_line_speed &&
 		    ((params->req_line_speed == SPEED_100) ||
@@ -3657,68 +3879,33 @@
 			vars->phy_flags &= ~PHY_SGMII_FLAG;
 		}
 	}
+	/* In case of external phy existance, the line speed would be the
+	 line speed linked up by the external phy. In case it is direct only,
+	  then the line_speed during initialization will be equal to the
+	   req_line_speed*/
+	vars->line_speed = params->req_line_speed;
 
-	if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
-		u16 bank, rx_eq;
-
-		rx_eq = ((params->serdes_config &
-			  PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
-			 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
-
-		DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
-		for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
-		      bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
-			CL45_WR_OVER_CL22(bp, port,
-					      params->phy_addr,
-					      bank ,
-					      MDIO_RX0_RX_EQ_BOOST,
-					      ((rx_eq &
-				MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
-				MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
-		}
-
-		/* forced speed requested? */
-		if (params->req_line_speed != SPEED_AUTO_NEG) {
-			DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
-
-			/* disable autoneg */
-			bnx2x_set_autoneg(params, vars);
-
-			/* program speed and duplex */
-			bnx2x_program_serdes(params);
-			vars->ieee_fc =
-				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
-
-		} else { /* AN_mode */
-			DP(NETIF_MSG_LINK, "not SGMII, AN\n");
-
-			/* AN enabled */
-			bnx2x_set_brcm_cl37_advertisment(params);
-
-			/* program duplex & pause advertisement (for aneg) */
-			bnx2x_set_ieee_aneg_advertisment(params,
-						       &vars->ieee_fc);
-
-			/* enable autoneg */
-			bnx2x_set_autoneg(params, vars);
-
-			/* enable and restart AN */
-			bnx2x_restart_autoneg(params);
-		}
-
-	} else { /* SGMII mode */
-		DP(NETIF_MSG_LINK, "SGMII\n");
-
-		bnx2x_initialize_sgmii_process(params);
-	}
+	bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
 
 	/* init ext phy and enable link state int */
-	rc |= bnx2x_ext_phy_init(params, vars);
+	non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
+		       (params->loopback_mode == LOOPBACK_XGXS_10) ||
+		       (params->loopback_mode == LOOPBACK_EXT_PHY));
+
+	if (non_ext_phy ||
+	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705)) {
+		if (params->req_line_speed == SPEED_AUTO_NEG)
+			bnx2x_set_parallel_detection(params, vars->phy_flags);
+		bnx2x_init_internal_phy(params, vars);
+	}
+
+	if (!non_ext_phy)
+		rc |= bnx2x_ext_phy_init(params, vars);
 
 	bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-		       (NIG_STATUS_XGXS0_LINK10G |
-			NIG_STATUS_XGXS0_LINK_STATUS |
-			NIG_STATUS_SERDES0_LINK_STATUS));
+		     (NIG_STATUS_XGXS0_LINK10G |
+		      NIG_STATUS_XGXS0_LINK_STATUS |
+		      NIG_STATUS_SERDES0_LINK_STATUS));
 
 	return rc;
 
@@ -3730,15 +3917,23 @@
 	struct bnx2x *bp = params->bp;
 
 	u32 val;
-	DP(NETIF_MSG_LINK, "Phy Initialization started\n");
+	DP(NETIF_MSG_LINK, "Phy Initialization started \n");
 	DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n",
 		  params->req_line_speed, params->req_flow_ctrl);
 	vars->link_status = 0;
+	vars->phy_link_up = 0;
+	vars->link_up = 0;
+	vars->line_speed = 0;
+	vars->duplex = DUPLEX_FULL;
+	vars->flow_ctrl = FLOW_CTRL_NONE;
+	vars->mac_type = MAC_TYPE_NONE;
+
 	if (params->switch_cfg ==  SWITCH_CFG_1G)
 		vars->phy_flags = PHY_SERDES_FLAG;
 	else
 		vars->phy_flags = PHY_XGXS_FLAG;
 
+
 	/* disable attentions */
 	bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
 		       (NIG_MASK_XGXS0_LINK_STATUS |
@@ -3894,6 +4089,7 @@
 		}
 
 		bnx2x_link_initialize(params, vars);
+		msleep(30);
 		bnx2x_link_int_enable(params);
 	}
 	return 0;
@@ -3943,39 +4139,22 @@
 			/* HW reset */
 
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-				       MISC_REGISTERS_GPIO_OUTPUT_LOW);
+					  MISC_REGISTERS_GPIO_OUTPUT_LOW,
+					  port);
 
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-				       MISC_REGISTERS_GPIO_OUTPUT_LOW);
+					  MISC_REGISTERS_GPIO_OUTPUT_LOW,
+					  port);
 
 			DP(NETIF_MSG_LINK, "reset external PHY\n");
-		} else {
-
-			u8 ext_phy_addr = ((ext_phy_config &
-					 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
-					 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
-
-			/* SW reset */
-			bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
-				       MDIO_PMA_DEVAD,
-				       MDIO_PMA_REG_CTRL,
-				       1<<15);
-
-			/* Set Low Power Mode */
-			bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
-				  MDIO_PMA_DEVAD,
-				  MDIO_PMA_REG_CTRL,
-				  1<<11);
-
-
-			if (ext_phy_type ==
-			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
-				DP(NETIF_MSG_LINK, "Setting 8073 port %d into"
+		} else if (ext_phy_type ==
+			   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
+				DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
 					 "low power mode\n",
 					 port);
 				bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-					MISC_REGISTERS_GPIO_OUTPUT_LOW);
-			}
+					MISC_REGISTERS_GPIO_OUTPUT_LOW,
+						  port);
 		}
 	}
 	/* reset the SerDes/XGXS */
@@ -3995,6 +4174,73 @@
 	return 0;
 }
 
+static u8 bnx2x_update_link_down(struct link_params *params,
+			       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+	bnx2x_set_led(bp, port, LED_MODE_OFF,
+		    0, params->hw_led_mode,
+		    params->chip_id);
+
+	/* indicate no mac active */
+	vars->mac_type = MAC_TYPE_NONE;
+
+	/* update shared memory */
+	vars->link_status = 0;
+	vars->line_speed = 0;
+	bnx2x_update_mng(params, vars->link_status);
+
+	/* activate nig drain */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+	/* reset BigMac */
+	bnx2x_bmac_rx_disable(bp, params->port);
+	REG_WR(bp, GRCBASE_MISC +
+		   MISC_REGISTERS_RESET_REG_2_CLEAR,
+		   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+	return 0;
+}
+
+static u8 bnx2x_update_link_up(struct link_params *params,
+			     struct link_vars *vars,
+			     u8 link_10g, u32 gp_status)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u8 rc = 0;
+	vars->link_status |= LINK_STATUS_LINK_UP;
+	if (link_10g) {
+		bnx2x_bmac_enable(params, vars, 0);
+		bnx2x_set_led(bp, port, LED_MODE_OPER,
+			    SPEED_10000, params->hw_led_mode,
+			    params->chip_id);
+
+	} else {
+		bnx2x_emac_enable(params, vars, 0);
+		rc = bnx2x_emac_program(params, vars->line_speed,
+				      vars->duplex);
+
+		/* AN complete? */
+		if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
+			if (!(vars->phy_flags &
+			      PHY_SGMII_FLAG))
+				bnx2x_set_sgmii_tx_driver(params);
+		}
+	}
+
+	/* PBF - link up */
+	rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+			      vars->line_speed);
+
+	/* disable drain */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+	/* update shared memory */
+	bnx2x_update_mng(params, vars->link_status);
+	return rc;
+}
 /* This function should called upon link interrupt */
 /* In case vars->link_up, driver needs to
 	1. Update the pbf
@@ -4012,10 +4258,10 @@
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
-	u16 i;
 	u16 gp_status;
-	u16 link_10g;
-	u8 rc = 0;
+	u8 link_10g;
+	u8 ext_phy_link_up, rc = 0;
+	u32 ext_phy_type;
 
 	DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
 	 port,
@@ -4031,15 +4277,16 @@
 	  REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
 	  REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
 
+	ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
 
-	/* avoid fast toggling */
-	for (i = 0; i < 10; i++) {
-		msleep(10);
-		CL45_RD_OVER_CL22(bp, port, params->phy_addr,
-				      MDIO_REG_BANK_GP_STATUS,
-				      MDIO_GP_STATUS_TOP_AN_STATUS1,
-				      &gp_status);
-	}
+	/* Check external link change only for non-direct */
+	ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars);
+
+	/* Read gp_status */
+	CL45_RD_OVER_CL22(bp, port, params->phy_addr,
+			      MDIO_REG_BANK_GP_STATUS,
+			      MDIO_GP_STATUS_TOP_AN_STATUS1,
+			      &gp_status);
 
 	rc = bnx2x_link_settings_status(params, vars, gp_status);
 	if (rc != 0)
@@ -4055,73 +4302,177 @@
 
 	bnx2x_link_int_ack(params, vars, link_10g);
 
-	/* link is up only if both local phy and external phy are up */
-	vars->link_up = (vars->phy_link_up &&
-			   bnx2x_ext_phy_is_link_up(params, vars));
+	/* In case external phy link is up, and internal link is down
+	( not initialized yet probably after link initialization, it needs
+	to be initialized.
+	Note that after link down-up as result of cable plug,
+	the xgxs link would probably become up again without the need to
+	initialize it*/
 
-	if (!vars->phy_link_up &&
-	    REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18)) {
-		bnx2x_ext_phy_is_link_up(params, vars); /* Clear interrupt */
+	if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
+	    (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
+	    (ext_phy_link_up && !vars->phy_link_up))
+		bnx2x_init_internal_phy(params, vars);
+
+	/* link is up only if both local phy and external phy are up */
+	vars->link_up = (ext_phy_link_up && vars->phy_link_up);
+
+	if (vars->link_up)
+		rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
+	else
+		rc = bnx2x_update_link_down(params, vars);
+
+	return rc;
+}
+
+static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+{
+	u8 ext_phy_addr[PORT_MAX];
+	u16 val;
+	s8 port;
+
+	/* PART1 - Reset both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		/* Extract the ext phy address for the port */
+		u32 ext_phy_config = REG_RD(bp, shmem_base +
+					offsetof(struct shmem_region,
+		   dev_info.port_hw_config[port].external_phy_config));
+
+		/* disable attentions */
+		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+			     (NIG_MASK_XGXS0_LINK_STATUS |
+			      NIG_MASK_XGXS0_LINK10G |
+			      NIG_MASK_SERDES0_LINK_STATUS |
+			      NIG_MASK_MI_INT));
+
+		ext_phy_addr[port] =
+			((ext_phy_config &
+			      PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+			      PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+
+		/* Need to take the phy out of low power mode in order
+			to write to access its registers */
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+				  MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+
+		/* Reset the phy */
+		bnx2x_cl45_write(bp, port,
+			       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+			       ext_phy_addr[port],
+			       MDIO_PMA_DEVAD,
+			       MDIO_PMA_REG_CTRL,
+			       1<<15);
 	}
 
-	if (vars->link_up) {
-		vars->link_status |= LINK_STATUS_LINK_UP;
-		if (link_10g) {
-			bnx2x_bmac_enable(params, vars, 0);
-			bnx2x_set_led(bp, port, LED_MODE_OPER,
-				    SPEED_10000, params->hw_led_mode,
-				    params->chip_id);
+	/* Add delay of 150ms after reset */
+	msleep(150);
 
-		} else {
-			bnx2x_emac_enable(params, vars, 0);
-			rc = bnx2x_emac_program(params, vars->line_speed,
-					      vars->duplex);
+	/* PART2 - Download firmware to both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		u16 fw_ver1;
 
-			/* AN complete? */
-			if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
-				if (!(vars->phy_flags &
-				      PHY_SGMII_FLAG))
-					bnx2x_set_sgmii_tx_driver(params);
-			}
+		bnx2x_bcm8073_external_rom_boot(bp, port,
+						      ext_phy_addr[port]);
+
+		bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+			      ext_phy_addr[port],
+			      MDIO_PMA_DEVAD,
+			      MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+		if (fw_ver1 == 0) {
+			DP(NETIF_MSG_LINK,
+				 "bnx2x_8073_common_init_phy port %x "
+				 "fw Download failed\n", port);
+			return -EINVAL;
 		}
 
-		/* PBF - link up */
-		rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
-				      vars->line_speed);
+		/* Only set bit 10 = 1 (Tx power down) */
+		bnx2x_cl45_read(bp, port,
+			      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+			      ext_phy_addr[port],
+			      MDIO_PMA_DEVAD,
+			      MDIO_PMA_REG_TX_POWER_DOWN, &val);
 
-		/* disable drain */
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+		/* Phase1 of TX_POWER_DOWN reset */
+		bnx2x_cl45_write(bp, port,
+			       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+			       ext_phy_addr[port],
+			       MDIO_PMA_DEVAD,
+			       MDIO_PMA_REG_TX_POWER_DOWN,
+			       (val | 1<<10));
+	}
 
-		/* update shared memory */
-		bnx2x_update_mng(params, vars->link_status);
+	/* Toggle Transmitter: Power down and then up with 600ms
+	   delay between */
+	msleep(600);
 
-	} else { /* link down */
-		DP(NETIF_MSG_LINK, "Port %x: Link is down\n", params->port);
-		bnx2x_set_led(bp, port, LED_MODE_OFF,
-			    0, params->hw_led_mode,
-			    params->chip_id);
+	/* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		/* Phase2 of POWER_DOWN_RESET*/
+		/* Release bit 10 (Release Tx power down) */
+		bnx2x_cl45_read(bp, port,
+			      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+			      ext_phy_addr[port],
+			      MDIO_PMA_DEVAD,
+			      MDIO_PMA_REG_TX_POWER_DOWN, &val);
 
-		/* indicate no mac active */
-		vars->mac_type = MAC_TYPE_NONE;
+		bnx2x_cl45_write(bp, port,
+			       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+			       ext_phy_addr[port],
+			       MDIO_PMA_DEVAD,
+			       MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+		msleep(15);
 
-		/* update shared memory */
-		vars->link_status = 0;
-		bnx2x_update_mng(params, vars->link_status);
+		/* Read modify write the SPI-ROM version select register */
+		bnx2x_cl45_read(bp, port,
+			      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+			      ext_phy_addr[port],
+			      MDIO_PMA_DEVAD,
+			      MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+		bnx2x_cl45_write(bp, port,
+			      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+			      ext_phy_addr[port],
+			      MDIO_PMA_DEVAD,
+			      MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
 
-		/* activate nig drain */
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+		/* set GPIO2 back to LOW */
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+				  MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+	}
+	return 0;
 
-		/* reset BigMac */
-		bnx2x_bmac_rx_disable(bp, params->port);
-		REG_WR(bp, GRCBASE_MISC +
-			   MISC_REGISTERS_RESET_REG_2_CLEAR,
-			   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+}
 
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+{
+	u8 rc = 0;
+	u32 ext_phy_type;
+
+	DP(NETIF_MSG_LINK, "bnx2x_common_init_phy\n");
+
+	/* Read the ext_phy_type for arbitrary port(0) */
+	ext_phy_type = XGXS_EXT_PHY_TYPE(
+			REG_RD(bp, shmem_base +
+			   offsetof(struct shmem_region,
+			     dev_info.port_hw_config[0].external_phy_config)));
+
+	switch (ext_phy_type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+	{
+		rc = bnx2x_8073_common_init_phy(bp, shmem_base);
+		break;
+	}
+	default:
+		DP(NETIF_MSG_LINK,
+			 "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
+			 ext_phy_type);
+		break;
 	}
 
 	return rc;
 }
 
+
+
 static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
 {
 	u16 val, cnt;
@@ -4154,7 +4505,7 @@
 }
 #define RESERVED_SIZE 256
 /* max application is 160K bytes - data at end of RAM */
-#define MAX_APP_SIZE 160*1024 - RESERVED_SIZE
+#define MAX_APP_SIZE (160*1024 - RESERVED_SIZE)
 
 /* Header is 14 bytes */
 #define HEADER_SIZE 14
@@ -4192,12 +4543,12 @@
 		size = MAX_APP_SIZE+HEADER_SIZE;
 	}
 	DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]);
-	DP(NETIF_MSG_LINK, "                %c%c\n", data[0x150], data[0x151]);
+	DP(NETIF_MSG_LINK, "  	      %c%c\n", data[0x150], data[0x151]);
 	/* Put the DSP in download mode by setting FLASH_CFG[2] to 1
 	   and issuing a reset.*/
 
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-			  MISC_REGISTERS_GPIO_HIGH);
+			  MISC_REGISTERS_GPIO_HIGH, port);
 
 	bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
 
@@ -4429,7 +4780,8 @@
 	}
 
 	/* DSP Remove Download Mode */
-	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, MISC_REGISTERS_GPIO_LOW);
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+			  MISC_REGISTERS_GPIO_LOW, port);
 
 	bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
 
@@ -4437,7 +4789,7 @@
 	for (cnt = 0; cnt < 100; cnt++)
 		msleep(5);
 
-	bnx2x_hw_reset(bp);
+	bnx2x_hw_reset(bp, port);
 
 	for (cnt = 0; cnt < 100; cnt++)
 		msleep(5);
@@ -4473,7 +4825,7 @@
 		      MDIO_PMA_REG_7101_VER2,
 		      &image_revision2);
 
-	if (data[0x14e]	!= (image_revision2&0xFF) ||
+	if (data[0x14e] != (image_revision2&0xFF) ||
 	    data[0x14f] != ((image_revision2&0xFF00)>>8) ||
 	    data[0x150] != (image_revision1&0xFF) ||
 	    data[0x151] != ((image_revision1&0xFF00)>>8)) {
@@ -4508,11 +4860,11 @@
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
 		/* Take ext phy out of reset */
 		if (!driver_loaded)
-			bnx2x_turn_on_sf(bp, port, ext_phy_addr);
+			bnx2x_turn_on_ef(bp, port, ext_phy_addr, ext_phy_type);
 		rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr,
 						data, size);
 		if (!driver_loaded)
-			bnx2x_turn_off_sf(bp);
+			bnx2x_turn_off_sf(bp, port);
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index 714d37a..86d54a1 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -55,14 +55,17 @@
 #define LOOPBACK_BMAC	2
 #define LOOPBACK_XGXS_10	3
 #define LOOPBACK_EXT_PHY	4
+#define LOOPBACK_EXT 	5
 
 	u16 req_duplex;
 	u16 req_flow_ctrl;
+	u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
+	req_flow_ctrl is set to AUTO */
 	u16 req_line_speed; /* Also determine AutoNeg */
 
 	/* Device parameters */
 	u8 mac_addr[6];
-	u16 mtu;
+
 
 
 	/* shmem parameters */
@@ -140,7 +143,7 @@
 		  u8 phy_addr, u8 devad, u16 reg, u16 val);
 
 /* Reads the link_status from the shmem,
-   and update the link vars accordinaly */
+   and update the link vars accordingly */
 void bnx2x_link_status_update(struct link_params *input,
 			    struct link_vars *output);
 /* returns string representing the fw_version of the external phy */
@@ -149,7 +152,7 @@
 
 /* Set/Unset the led
    Basically, the CLC takes care of the led for the link, but in case one needs
-   to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to
+   to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
    blink the led, and LED_MODE_OFF to set the led off.*/
 u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
 	       u16 hw_led_mode, u32 chip_id);
@@ -164,5 +167,7 @@
 	otherwise link is down*/
 u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
 
+/* One-time initialization for external phy after power up */
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
 
 #endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 272a4bd..3e7dc17 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -60,8 +60,8 @@
 #include "bnx2x.h"
 #include "bnx2x_init.h"
 
-#define DRV_MODULE_VERSION      "1.45.6"
-#define DRV_MODULE_RELDATE      "2008/06/23"
+#define DRV_MODULE_VERSION      "1.45.17"
+#define DRV_MODULE_RELDATE      "2008/08/13"
 #define BNX2X_BC_VER		0x040200
 
 /* Time in jiffies before concluding the transmitter is hung */
@@ -76,23 +76,21 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
+static int disable_tpa;
 static int use_inta;
 static int poll;
 static int debug;
-static int disable_tpa;
-static int nomcp;
 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
 static int use_multi;
 
+module_param(disable_tpa, int, 0);
 module_param(use_inta, int, 0);
 module_param(poll, int, 0);
 module_param(debug, int, 0);
-module_param(disable_tpa, int, 0);
-module_param(nomcp, int, 0);
+MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
 MODULE_PARM_DESC(poll, "use polling (for debug)");
 MODULE_PARM_DESC(debug, "default debug msglevel");
-MODULE_PARM_DESC(nomcp, "ignore management CPU");
 
 #ifdef BNX2X_MULTI
 module_param(use_multi, int, 0);
@@ -237,17 +235,16 @@
 	while (*wb_comp != DMAE_COMP_VAL) {
 		DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
 
-		/* adjust delay for emulation/FPGA */
-		if (CHIP_REV_IS_SLOW(bp))
-			msleep(100);
-		else
-			udelay(5);
-
 		if (!cnt) {
 			BNX2X_ERR("dmae timeout!\n");
 			break;
 		}
 		cnt--;
+		/* adjust delay for emulation/FPGA */
+		if (CHIP_REV_IS_SLOW(bp))
+			msleep(100);
+		else
+			udelay(5);
 	}
 
 	mutex_unlock(&bp->dmae_mutex);
@@ -310,17 +307,16 @@
 
 	while (*wb_comp != DMAE_COMP_VAL) {
 
-		/* adjust delay for emulation/FPGA */
-		if (CHIP_REV_IS_SLOW(bp))
-			msleep(100);
-		else
-			udelay(5);
-
 		if (!cnt) {
 			BNX2X_ERR("dmae timeout!\n");
 			break;
 		}
 		cnt--;
+		/* adjust delay for emulation/FPGA */
+		if (CHIP_REV_IS_SLOW(bp))
+			msleep(100);
+		else
+			udelay(5);
 	}
 	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
 	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
@@ -503,6 +499,9 @@
 	int i;
 	u16 j, start, end;
 
+	bp->stats_state = STATS_STATE_DISABLED;
+	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
 	BNX2X_ERR("begin crash dump -----------------\n");
 
 	for_each_queue(bp, i) {
@@ -513,17 +512,20 @@
 			  "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
 			  i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
 			  fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
-		BNX2X_ERR("          rx_comp_prod(%x)  rx_comp_cons(%x)"
-			  "  *rx_cons_sb(%x)  *rx_bd_cons_sb(%x)"
-			  "  rx_sge_prod(%x)  last_max_sge(%x)\n",
-			  fp->rx_comp_prod, fp->rx_comp_cons,
-			  le16_to_cpu(*fp->rx_cons_sb),
-			  le16_to_cpu(*fp->rx_bd_cons_sb),
-			  fp->rx_sge_prod, fp->last_max_sge);
-		BNX2X_ERR("          fp_c_idx(%x)  fp_u_idx(%x)"
-			  "  bd data(%x,%x)  rx_alloc_failed(%lx)\n",
-			  fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod,
-			  hw_prods->bds_prod, fp->rx_alloc_failed);
+		BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
+			  "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
+			  "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
+			  fp->rx_bd_prod, fp->rx_bd_cons,
+			  le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
+			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+		BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
+			  "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
+			  "  *sb_u_idx(%x)  bd data(%x,%x)\n",
+			  fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
+			  fp->status_blk->c_status_block.status_block_index,
+			  fp->fp_u_idx,
+			  fp->status_blk->u_status_block.status_block_index,
+			  hw_prods->packets_prod, hw_prods->bds_prod);
 
 		start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
 		end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
@@ -553,8 +555,8 @@
 				  j, rx_bd[1], rx_bd[0], sw_bd->skb);
 		}
 
-		start = 0;
-		end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
+		start = RX_SGE(fp->rx_sge_prod);
+		end = RX_SGE(fp->last_max_sge);
 		for (j = start; j < end; j++) {
 			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
 			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
@@ -582,9 +584,6 @@
 	bnx2x_fw_dump(bp);
 	bnx2x_mc_assert(bp);
 	BNX2X_ERR("end crash dump -----------------\n");
-
-	bp->stats_state = STATS_STATE_DISABLED;
-	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
 }
 
 static void bnx2x_int_enable(struct bnx2x *bp)
@@ -684,7 +683,8 @@
 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
 				u8 storm, u16 index, u8 op, u8 update)
 {
-	u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
+	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+		       COMMAND_REG_INT_ACK);
 	struct igu_ack_register igu_ack;
 
 	igu_ack.status_block_index = index;
@@ -694,9 +694,9 @@
 			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
 			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
 
-	DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
-	   (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
-	REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
+	DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
+	   (*(u32 *)&igu_ack), hc_addr);
+	REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
 }
 
 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -716,36 +716,15 @@
 	return rc;
 }
 
-static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
-{
-	u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
-
-	if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
-		rx_cons_sb++;
-
-	if ((fp->rx_comp_cons != rx_cons_sb) ||
-	    (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
-	    (fp->tx_pkt_prod != fp->tx_pkt_cons))
-		return 1;
-
-	return 0;
-}
-
 static u16 bnx2x_ack_int(struct bnx2x *bp)
 {
-	u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
-	u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
+	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+		       COMMAND_REG_SIMD_MASK);
+	u32 result = REG_RD(bp, hc_addr);
 
-	DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
-	   result, BAR_IGU_INTMEM + igu_addr);
+	DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
+	   result, hc_addr);
 
-#ifdef IGU_DEBUG
-#warning IGU_DEBUG active
-	if (result == 0) {
-		BNX2X_ERR("read %x from IGU\n", result);
-		REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
-	}
-#endif
 	return result;
 }
 
@@ -898,6 +877,7 @@
 		netif_tx_lock(bp->dev);
 
 		if (netif_queue_stopped(bp->dev) &&
+		    (bp->state == BNX2X_STATE_OPEN) &&
 		    (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
 			netif_wake_queue(bp->dev);
 
@@ -905,6 +885,7 @@
 	}
 }
 
+
 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
 			   union eth_rx_cqe *rr_cqe)
 {
@@ -960,6 +941,7 @@
 		bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
 		break;
 
+
 	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
 	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
 		DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
@@ -1169,8 +1151,8 @@
 	memset(fp->sge_mask, 0xff,
 	       (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
 
-	/* Clear the two last indeces in the page to 1:
-	   these are the indeces that correspond to the "next" element,
+	/* Clear the two last indices in the page to 1:
+	   these are the indices that correspond to the "next" element,
 	   hence will never be indicated and should be removed from
 	   the calculations. */
 	bnx2x_clear_sge_mask_next_elems(fp);
@@ -1261,7 +1243,7 @@
 		   where we are and drop the whole packet */
 		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
 		if (unlikely(err)) {
-			fp->rx_alloc_failed++;
+			bp->eth_stats.rx_skb_alloc_failed++;
 			return err;
 		}
 
@@ -1297,14 +1279,13 @@
 	pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
 			 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
 
-	/* if alloc failed drop the packet and keep the buffer in the bin */
 	if (likely(new_skb)) {
+		/* fix ip xsum and give it to the stack */
+		/* (no need to map the new skb) */
 
 		prefetch(skb);
 		prefetch(((char *)(skb)) + 128);
 
-		/* else fix ip xsum and give it to the stack */
-		/* (no need to map the new skb) */
 #ifdef BNX2X_STOP_ON_ERROR
 		if (pad + len > bp->rx_buf_size) {
 			BNX2X_ERR("skb_put is about to fail...  "
@@ -1353,9 +1334,10 @@
 		fp->tpa_pool[queue].skb = new_skb;
 
 	} else {
+		/* else drop the packet and keep the buffer in the bin */
 		DP(NETIF_MSG_RX_STATUS,
 		   "Failed to allocate new skb - dropping packet!\n");
-		fp->rx_alloc_failed++;
+		bp->eth_stats.rx_skb_alloc_failed++;
 	}
 
 	fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1390,7 +1372,6 @@
 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
 	int rx_pkt = 0;
-	u16 queue;
 
 #ifdef BNX2X_STOP_ON_ERROR
 	if (unlikely(bp->panic))
@@ -1456,7 +1437,7 @@
 			if ((!fp->disable_tpa) &&
 			    (TPA_TYPE(cqe_fp_flags) !=
 					(TPA_TYPE_START | TPA_TYPE_END))) {
-				queue = cqe->fast_path_cqe.queue_index;
+				u16 queue = cqe->fast_path_cqe.queue_index;
 
 				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
 					DP(NETIF_MSG_RX_STATUS,
@@ -1503,11 +1484,10 @@
 
 			/* is this an error packet? */
 			if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
-			/* do we sometimes forward error packets anyway? */
 				DP(NETIF_MSG_RX_ERR,
 				   "ERROR  flags %x  rx packet %u\n",
 				   cqe_fp_flags, sw_comp_cons);
-				/* TBD make sure MC counts this as a drop */
+				bp->eth_stats.rx_err_discard_pkt++;
 				goto reuse_rx;
 			}
 
@@ -1524,7 +1504,7 @@
 					DP(NETIF_MSG_RX_ERR,
 					   "ERROR  packet dropped "
 					   "because of alloc failure\n");
-					fp->rx_alloc_failed++;
+					bp->eth_stats.rx_skb_alloc_failed++;
 					goto reuse_rx;
 				}
 
@@ -1550,7 +1530,7 @@
 				DP(NETIF_MSG_RX_ERR,
 				   "ERROR  packet dropped because "
 				   "of alloc failure\n");
-				fp->rx_alloc_failed++;
+				bp->eth_stats.rx_skb_alloc_failed++;
 reuse_rx:
 				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
 				goto next_rx;
@@ -1559,10 +1539,12 @@
 			skb->protocol = eth_type_trans(skb, bp->dev);
 
 			skb->ip_summed = CHECKSUM_NONE;
-			if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
-				skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-			/* TBD do we pass bad csum packets in promisc */
+			if (bp->rx_csum) {
+				if (likely(BNX2X_RX_CSUM_OK(cqe)))
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+				else
+					bp->eth_stats.hw_csum_err++;
+			}
 		}
 
 #ifdef BCM_VLAN
@@ -1615,6 +1597,12 @@
 	struct net_device *dev = bp->dev;
 	int index = FP_IDX(fp);
 
+	/* Return here if interrupt is disabled */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+		return IRQ_HANDLED;
+	}
+
 	DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
 	   index, FP_SB_ID(fp));
 	bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1648,17 +1636,17 @@
 	}
 	DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
 
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return IRQ_HANDLED;
-#endif
-
 	/* Return here if interrupt is disabled */
 	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
 		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
 		return IRQ_HANDLED;
 	}
 
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
 	mask = 0x2 << bp->fp[0].sb_id;
 	if (status & mask) {
 		struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -1699,11 +1687,12 @@
  * General service functions
  */
 
-static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
+static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
 {
 	u32 lock_status;
 	u32 resource_bit = (1 << resource);
-	u8 port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
 	int cnt;
 
 	/* Validating that the resource is within range */
@@ -1714,8 +1703,15 @@
 		return -EINVAL;
 	}
 
+	if (func <= 5) {
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	} else {
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+	}
+
 	/* Validating that the resource is not already taken */
-	lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
+	lock_status = REG_RD(bp, hw_lock_control_reg);
 	if (lock_status & resource_bit) {
 		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
 		   lock_status, resource_bit);
@@ -1725,9 +1721,8 @@
 	/* Try for 1 second every 5ms */
 	for (cnt = 0; cnt < 200; cnt++) {
 		/* Try to acquire the lock */
-		REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
-		       resource_bit);
-		lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
+		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+		lock_status = REG_RD(bp, hw_lock_control_reg);
 		if (lock_status & resource_bit)
 			return 0;
 
@@ -1737,11 +1732,12 @@
 	return -EAGAIN;
 }
 
-static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
+static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
 {
 	u32 lock_status;
 	u32 resource_bit = (1 << resource);
-	u8 port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
 
 	/* Validating that the resource is within range */
 	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -1751,20 +1747,27 @@
 		return -EINVAL;
 	}
 
+	if (func <= 5) {
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	} else {
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+	}
+
 	/* Validating that the resource is currently taken */
-	lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
+	lock_status = REG_RD(bp, hw_lock_control_reg);
 	if (!(lock_status & resource_bit)) {
 		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
 		   lock_status, resource_bit);
 		return -EFAULT;
 	}
 
-	REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
+	REG_WR(bp, hw_lock_control_reg, resource_bit);
 	return 0;
 }
 
 /* HW Lock for shared dual port PHYs */
-static void bnx2x_phy_hw_lock(struct bnx2x *bp)
+static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
 {
 	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
 
@@ -1772,25 +1775,25 @@
 
 	if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
 	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
-		bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
 }
 
-static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
+static void bnx2x_release_phy_lock(struct bnx2x *bp)
 {
 	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
 
 	if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
 	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
-		bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
 
 	mutex_unlock(&bp->port.phy_mutex);
 }
 
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 {
 	/* The GPIO should be swapped if swap register is set and active */
 	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
-			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
 	int gpio_shift = gpio_num +
 			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
 	u32 gpio_mask = (1 << gpio_shift);
@@ -1801,7 +1804,7 @@
 		return -EINVAL;
 	}
 
-	bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 	/* read GPIO and mask except the float bits */
 	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
 
@@ -1822,7 +1825,7 @@
 		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
 		break;
 
-	case MISC_REGISTERS_GPIO_INPUT_HI_Z :
+	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
 		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
 		   gpio_num, gpio_shift);
 		/* set FLOAT */
@@ -1834,7 +1837,7 @@
 	}
 
 	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
-	bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
 
 	return 0;
 }
@@ -1850,19 +1853,19 @@
 		return -EINVAL;
 	}
 
-	bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
 	/* read SPIO and mask except the float bits */
 	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
 
 	switch (mode) {
-	case MISC_REGISTERS_SPIO_OUTPUT_LOW :
+	case MISC_REGISTERS_SPIO_OUTPUT_LOW:
 		DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
 		/* clear FLOAT and set CLR */
 		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
 		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
 		break;
 
-	case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
+	case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
 		DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
 		/* clear FLOAT and set SET */
 		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
@@ -1880,7 +1883,7 @@
 	}
 
 	REG_WR(bp, MISC_REG_SPIO, spio_reg);
-	bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
 
 	return 0;
 }
@@ -1940,46 +1943,63 @@
 
 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
 {
-	u8 rc;
+	if (!BP_NOMCP(bp)) {
+		u8 rc;
 
-	/* Initialize link parameters structure variables */
-	bp->link_params.mtu = bp->dev->mtu;
+		/* Initialize link parameters structure variables */
+		/* It is recommended to turn off RX FC for jumbo frames
+		   for better performance */
+		if (IS_E1HMF(bp))
+			bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
+		else if (bp->dev->mtu > 5000)
+			bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
+		else
+			bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
 
-	bnx2x_phy_hw_lock(bp);
-	rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
-	bnx2x_phy_hw_unlock(bp);
+		bnx2x_acquire_phy_lock(bp);
+		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+		bnx2x_release_phy_lock(bp);
 
-	if (bp->link_vars.link_up)
-		bnx2x_link_report(bp);
+		if (bp->link_vars.link_up)
+			bnx2x_link_report(bp);
 
-	bnx2x_calc_fc_adv(bp);
+		bnx2x_calc_fc_adv(bp);
 
-	return rc;
+		return rc;
+	}
+	BNX2X_ERR("Bootcode is missing -not initializing link\n");
+	return -EINVAL;
 }
 
 static void bnx2x_link_set(struct bnx2x *bp)
 {
-	bnx2x_phy_hw_lock(bp);
-	bnx2x_phy_init(&bp->link_params, &bp->link_vars);
-	bnx2x_phy_hw_unlock(bp);
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+		bnx2x_release_phy_lock(bp);
 
-	bnx2x_calc_fc_adv(bp);
+		bnx2x_calc_fc_adv(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing -not setting link\n");
 }
 
 static void bnx2x__link_reset(struct bnx2x *bp)
 {
-	bnx2x_phy_hw_lock(bp);
-	bnx2x_link_reset(&bp->link_params, &bp->link_vars);
-	bnx2x_phy_hw_unlock(bp);
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_link_reset(&bp->link_params, &bp->link_vars);
+		bnx2x_release_phy_lock(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing -not resetting link\n");
 }
 
 static u8 bnx2x_link_test(struct bnx2x *bp)
 {
 	u8 rc;
 
-	bnx2x_phy_hw_lock(bp);
+	bnx2x_acquire_phy_lock(bp);
 	rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
-	bnx2x_phy_hw_unlock(bp);
+	bnx2x_release_phy_lock(bp);
 
 	return rc;
 }
@@ -1991,7 +2011,7 @@
      sum of vn_min_rates
        or
      0 - if all the min_rates are 0.
-     In the later case fainess algorithm should be deactivated.
+     In the later case fairness algorithm should be deactivated.
      If not all min_rates are zero then those that are zeroes will
      be set to 1.
  */
@@ -2114,7 +2134,7 @@
 				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
 		/* If FAIRNESS is enabled (not all min rates are zeroes) and
 		   if current min rate is zero - set it to 1.
-		   This is a requirment of the algorithm. */
+		   This is a requirement of the algorithm. */
 		if ((vn_min_rate == 0) && wsum)
 			vn_min_rate = DEF_MIN_RATE;
 		vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
@@ -2203,9 +2223,9 @@
 	/* Make sure that we are synced with the current statistics */
 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
-	bnx2x_phy_hw_lock(bp);
+	bnx2x_acquire_phy_lock(bp);
 	bnx2x_link_update(&bp->link_params, &bp->link_vars);
-	bnx2x_phy_hw_unlock(bp);
+	bnx2x_release_phy_lock(bp);
 
 	if (bp->link_vars.link_up) {
 
@@ -2357,7 +2377,7 @@
 }
 
 /* acquire split MCP access lock register */
-static int bnx2x_lock_alr(struct bnx2x *bp)
+static int bnx2x_acquire_alr(struct bnx2x *bp)
 {
 	u32 i, j, val;
 	int rc = 0;
@@ -2374,15 +2394,15 @@
 		msleep(5);
 	}
 	if (!(val & (1L << 31))) {
-		BNX2X_ERR("Cannot acquire nvram interface\n");
+		BNX2X_ERR("Cannot acquire MCP access lock register\n");
 		rc = -EBUSY;
 	}
 
 	return rc;
 }
 
-/* Release split MCP access lock register */
-static void bnx2x_unlock_alr(struct bnx2x *bp)
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
 {
 	u32 val = 0;
 
@@ -2395,7 +2415,6 @@
 	u16 rc = 0;
 
 	barrier(); /* status block is written to by the chip */
-
 	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
 		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
 		rc |= 1;
@@ -2426,26 +2445,31 @@
 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
 {
 	int port = BP_PORT(bp);
-	int func = BP_FUNC(bp);
-	u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
+	u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
+		       COMMAND_REG_ATTN_BITS_SET);
 	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
 	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
 				       NIG_REG_MASK_INTERRUPT_PORT0;
+	u32 aeu_mask;
 
-	if (~bp->aeu_mask & (asserted & 0xff))
-		BNX2X_ERR("IGU ERROR\n");
 	if (bp->attn_state & asserted)
 		BNX2X_ERR("IGU ERROR\n");
 
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+	aeu_mask = REG_RD(bp, aeu_addr);
+
 	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
-	   bp->aeu_mask, asserted);
-	bp->aeu_mask &= ~(asserted & 0xff);
-	DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
+	   aeu_mask, asserted);
+	aeu_mask &= ~(asserted & 0xff);
+	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
 
-	REG_WR(bp, aeu_addr, bp->aeu_mask);
+	REG_WR(bp, aeu_addr, aeu_mask);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 
+	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
 	bp->attn_state |= asserted;
+	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
 
 	if (asserted & ATTN_HARD_WIRED_MASK) {
 		if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2500,9 +2524,9 @@
 
 	} /* if hardwired */
 
-	DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
-	   asserted, BAR_IGU_INTMEM + igu_addr);
-	REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
+	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+	   asserted, hc_addr);
+	REG_WR(bp, hc_addr, asserted);
 
 	/* now set back the mask */
 	if (asserted & ATTN_NIG_FOR_FUNC)
@@ -2530,12 +2554,12 @@
 		case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
 			/* Fan failure attention */
 
-			/* The PHY reset is controled by GPIO 1 */
+			/* The PHY reset is controlled by GPIO 1 */
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-				       MISC_REGISTERS_GPIO_OUTPUT_LOW);
-			/* Low power mode is controled by GPIO 2 */
+				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+			/* Low power mode is controlled by GPIO 2 */
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-				       MISC_REGISTERS_GPIO_OUTPUT_LOW);
+				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 			/* mark the failure */
 			bp->link_params.ext_phy_config &=
 					~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
@@ -2699,10 +2723,11 @@
 	int index;
 	u32 reg_addr;
 	u32 val;
+	u32 aeu_mask;
 
 	/* need to take HW lock because MCP or other port might also
 	   try to handle this event */
-	bnx2x_lock_alr(bp);
+	bnx2x_acquire_alr(bp);
 
 	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
 	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
@@ -2734,32 +2759,35 @@
 						HW_PRTY_ASSERT_SET_1) ||
 			    (attn.sig[2] & group_mask.sig[2] &
 						HW_PRTY_ASSERT_SET_2))
-			       BNX2X_ERR("FATAL HW block parity attention\n");
+				BNX2X_ERR("FATAL HW block parity attention\n");
 		}
 	}
 
-	bnx2x_unlock_alr(bp);
+	bnx2x_release_alr(bp);
 
-	reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
+	reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
 
 	val = ~deasserted;
-/*	DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
-	   val, BAR_IGU_INTMEM + reg_addr); */
-	REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
+	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+	   val, reg_addr);
+	REG_WR(bp, reg_addr, val);
 
-	if (bp->aeu_mask & (deasserted & 0xff))
-		BNX2X_ERR("IGU BUG!\n");
 	if (~bp->attn_state & deasserted)
-		BNX2X_ERR("IGU BUG!\n");
+		BNX2X_ERR("IGU ERROR\n");
 
 	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
 			  MISC_REG_AEU_MASK_ATTN_FUNC_0;
 
-	DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
-	bp->aeu_mask |= (deasserted & 0xff);
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+	aeu_mask = REG_RD(bp, reg_addr);
 
-	DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
-	REG_WR(bp, reg_addr, bp->aeu_mask);
+	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
+	   aeu_mask, deasserted);
+	aeu_mask |= (deasserted & 0xff);
+	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+	REG_WR(bp, reg_addr, aeu_mask);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 
 	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
 	bp->attn_state &= ~deasserted;
@@ -2800,7 +2828,7 @@
 
 	/* Return here if interrupt is disabled */
 	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
+		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
 		return;
 	}
 
@@ -2808,7 +2836,7 @@
 /*	if (status == 0)				     */
 /*		BNX2X_ERR("spurious slowpath interrupt!\n"); */
 
-	DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
+	DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
 
 	/* HW attentions */
 	if (status & 0x1)
@@ -2838,7 +2866,7 @@
 
 	/* Return here if interrupt is disabled */
 	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
+		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
 		return IRQ_HANDLED;
 	}
 
@@ -2876,11 +2904,11 @@
 			/* underflow */ \
 			d_hi = m_hi - s_hi; \
 			if (d_hi > 0) { \
-			/* we can 'loan' 1 */ \
+				/* we can 'loan' 1 */ \
 				d_hi--; \
 				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
 			} else { \
-			/* m_hi <= s_hi */ \
+				/* m_hi <= s_hi */ \
 				d_hi = 0; \
 				d_lo = 0; \
 			} \
@@ -2890,7 +2918,7 @@
 				d_hi = 0; \
 				d_lo = 0; \
 			} else { \
-			/* m_hi >= s_hi */ \
+				/* m_hi >= s_hi */ \
 				d_hi = m_hi - s_hi; \
 				d_lo = m_lo - s_lo; \
 			} \
@@ -2963,37 +2991,6 @@
  * Init service functions
  */
 
-static void bnx2x_storm_stats_init(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
-
-	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
-	REG_WR(bp, BAR_XSTRORM_INTMEM +
-	       XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
-
-	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
-	REG_WR(bp, BAR_TSTRORM_INTMEM +
-	       TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
-
-	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
-	REG_WR(bp, BAR_CSTRORM_INTMEM +
-	       CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
-
-	REG_WR(bp, BAR_XSTRORM_INTMEM +
-	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
-	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
-	REG_WR(bp, BAR_XSTRORM_INTMEM +
-	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
-	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
-	REG_WR(bp, BAR_TSTRORM_INTMEM +
-	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
-	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
-	REG_WR(bp, BAR_TSTRORM_INTMEM +
-	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
-	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-}
-
 static void bnx2x_storm_stats_post(struct bnx2x *bp)
 {
 	if (!bp->stats_pending) {
@@ -3032,6 +3029,8 @@
 	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
 	bp->port.old_nig_stats.brb_discard =
 			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+	bp->port.old_nig_stats.brb_truncate =
+			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
 	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
 		    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
 	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
@@ -3101,12 +3100,12 @@
 
 	might_sleep();
 	while (*stats_comp != DMAE_COMP_VAL) {
-		msleep(1);
 		if (!cnt) {
 			BNX2X_ERR("timeout waiting for stats finished\n");
 			break;
 		}
 		cnt--;
+		msleep(1);
 	}
 	return 1;
 }
@@ -3451,8 +3450,7 @@
 	UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
 	UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
 	UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
-	UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
-	UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
+	UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
 	UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
 	UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
 	UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
@@ -3536,6 +3534,8 @@
 
 	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
 		      new->brb_discard - old->brb_discard);
+	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+		      new->brb_truncate - old->brb_truncate);
 
 	UPDATE_STAT64_NIG(egress_mac_pkt0,
 					etherstatspkts1024octetsto1522octets);
@@ -3713,8 +3713,7 @@
 	nstats->rx_length_errors =
 				estats->rx_stat_etherstatsundersizepkts_lo +
 				estats->jabber_packets_received;
-	nstats->rx_over_errors = estats->brb_drop_lo +
-				 estats->brb_truncate_discard;
+	nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
 	nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
 	nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
 	nstats->rx_fifo_errors = old_tclient->no_buff_discard;
@@ -3783,7 +3782,7 @@
 			     bp->fp->rx_comp_cons),
 		       le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
 		printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
-		       netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
+		       netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
 		       estats->driver_xoff, estats->brb_drop_lo);
 		printk(KERN_DEBUG "tstats: checksum_discard %u  "
 			"packets_too_big_discard %u  no_buff_discard %u  "
@@ -3994,14 +3993,14 @@
 
 	bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
 			USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
-			sizeof(struct ustorm_def_status_block)/4);
+			sizeof(struct ustorm_status_block)/4);
 	bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
 			CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
-			sizeof(struct cstorm_def_status_block)/4);
+			sizeof(struct cstorm_status_block)/4);
 }
 
-static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
-			  struct host_status_block *sb,	dma_addr_t mapping)
+static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
+			  dma_addr_t mapping, int sb_id)
 {
 	int port = BP_PORT(bp);
 	int func = BP_FUNC(bp);
@@ -4077,7 +4076,6 @@
 					    atten_status_block);
 	def_sb->atten_status_block.status_block_id = sb_id;
 
-	bp->def_att_idx = 0;
 	bp->attn_state = 0;
 
 	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4094,9 +4092,6 @@
 					       reg_offset + 0xc + 0x10*index);
 	}
 
-	bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-					  MISC_REG_AEU_MASK_ATTN_FUNC_0));
-
 	reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
 			     HC_REG_ATTN_MSG0_ADDR_L);
 
@@ -4114,17 +4109,13 @@
 					    u_def_status_block);
 	def_sb->u_def_status_block.status_block_id = sb_id;
 
-	bp->def_u_idx = 0;
-
 	REG_WR(bp, BAR_USTRORM_INTMEM +
 	       USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
 	REG_WR(bp, BAR_USTRORM_INTMEM +
 	       ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
 	       U64_HI(section));
-	REG_WR8(bp, BAR_USTRORM_INTMEM +  DEF_USB_FUNC_OFF +
+	REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
 		USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
-	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
-	       BNX2X_BTR);
 
 	for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
 		REG_WR16(bp, BAR_USTRORM_INTMEM +
@@ -4135,17 +4126,13 @@
 					    c_def_status_block);
 	def_sb->c_def_status_block.status_block_id = sb_id;
 
-	bp->def_c_idx = 0;
-
 	REG_WR(bp, BAR_CSTRORM_INTMEM +
 	       CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
 	REG_WR(bp, BAR_CSTRORM_INTMEM +
 	       ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
 	       U64_HI(section));
-	REG_WR8(bp, BAR_CSTRORM_INTMEM +  DEF_CSB_FUNC_OFF +
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
 		CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
-	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
-	       BNX2X_BTR);
 
 	for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
 		REG_WR16(bp, BAR_CSTRORM_INTMEM +
@@ -4156,17 +4143,13 @@
 					    t_def_status_block);
 	def_sb->t_def_status_block.status_block_id = sb_id;
 
-	bp->def_t_idx = 0;
-
 	REG_WR(bp, BAR_TSTRORM_INTMEM +
 	       TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
 	REG_WR(bp, BAR_TSTRORM_INTMEM +
 	       ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
 	       U64_HI(section));
-	REG_WR8(bp, BAR_TSTRORM_INTMEM +  DEF_TSB_FUNC_OFF +
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
 		TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
-	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
-	       BNX2X_BTR);
 
 	for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
 		REG_WR16(bp, BAR_TSTRORM_INTMEM +
@@ -4177,23 +4160,20 @@
 					    x_def_status_block);
 	def_sb->x_def_status_block.status_block_id = sb_id;
 
-	bp->def_x_idx = 0;
-
 	REG_WR(bp, BAR_XSTRORM_INTMEM +
 	       XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
 	REG_WR(bp, BAR_XSTRORM_INTMEM +
 	       ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
 	       U64_HI(section));
-	REG_WR8(bp, BAR_XSTRORM_INTMEM +  DEF_XSB_FUNC_OFF +
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
 		XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
-	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
-	       BNX2X_BTR);
 
 	for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
 		REG_WR16(bp, BAR_XSTRORM_INTMEM +
 			 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
 
 	bp->stats_pending = 0;
+	bp->set_mac_pending = 0;
 
 	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
 }
@@ -4209,21 +4189,25 @@
 		/* HC_INDEX_U_ETH_RX_CQ_CONS */
 		REG_WR8(bp, BAR_USTRORM_INTMEM +
 			USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
-						   HC_INDEX_U_ETH_RX_CQ_CONS),
+						    U_SB_ETH_RX_CQ_INDEX),
 			bp->rx_ticks/12);
 		REG_WR16(bp, BAR_USTRORM_INTMEM +
 			 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
-						   HC_INDEX_U_ETH_RX_CQ_CONS),
+						     U_SB_ETH_RX_CQ_INDEX),
+			 bp->rx_ticks ? 0 : 1);
+		REG_WR16(bp, BAR_USTRORM_INTMEM +
+			 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
+						     U_SB_ETH_RX_BD_INDEX),
 			 bp->rx_ticks ? 0 : 1);
 
 		/* HC_INDEX_C_ETH_TX_CQ_CONS */
 		REG_WR8(bp, BAR_CSTRORM_INTMEM +
 			CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
-						   HC_INDEX_C_ETH_TX_CQ_CONS),
+						    C_SB_ETH_TX_CQ_INDEX),
 			bp->tx_ticks/12);
 		REG_WR16(bp, BAR_CSTRORM_INTMEM +
 			 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
-						   HC_INDEX_C_ETH_TX_CQ_CONS),
+						     C_SB_ETH_TX_CQ_INDEX),
 			 bp->tx_ticks ? 0 : 1);
 	}
 }
@@ -4256,7 +4240,9 @@
 static void bnx2x_init_rx_rings(struct bnx2x *bp)
 {
 	int func = BP_FUNC(bp);
-	u16 ring_prod, cqe_ring_prod = 0;
+	int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+					      ETH_MAX_AGGREGATION_QUEUES_E1H;
+	u16 ring_prod, cqe_ring_prod;
 	int i, j;
 
 	bp->rx_buf_use_size = bp->dev->mtu;
@@ -4270,9 +4256,9 @@
 		   bp->dev->mtu + ETH_OVREHEAD);
 
 		for_each_queue(bp, j) {
-			for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
-				struct bnx2x_fastpath *fp = &bp->fp[j];
+			struct bnx2x_fastpath *fp = &bp->fp[j];
 
+			for (i = 0; i < max_agg_queues; i++) {
 				fp->tpa_pool[i].skb =
 				   netdev_alloc_skb(bp->dev, bp->rx_buf_size);
 				if (!fp->tpa_pool[i].skb) {
@@ -4352,8 +4338,7 @@
 				BNX2X_ERR("disabling TPA for queue[%d]\n", j);
 				/* Cleanup already allocated elements */
 				bnx2x_free_rx_sge_range(bp, fp, ring_prod);
-				bnx2x_free_tpa_pool(bp, fp,
-					      ETH_MAX_AGGREGATION_QUEUES_E1H);
+				bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
 				fp->disable_tpa = 1;
 				ring_prod = 0;
 				break;
@@ -4363,13 +4348,13 @@
 		fp->rx_sge_prod = ring_prod;
 
 		/* Allocate BDs and initialize BD ring */
-		fp->rx_comp_cons = fp->rx_alloc_failed = 0;
+		fp->rx_comp_cons = 0;
 		cqe_ring_prod = ring_prod = 0;
 		for (i = 0; i < bp->rx_ring_size; i++) {
 			if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
 				BNX2X_ERR("was only able to allocate "
 					  "%d rx skbs\n", i);
-				fp->rx_alloc_failed++;
+				bp->eth_stats.rx_skb_alloc_failed++;
 				break;
 			}
 			ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4497,7 +4482,7 @@
 		}
 
 		context->cstorm_st_context.sb_index_number =
-						HC_INDEX_C_ETH_TX_CQ_CONS;
+						C_SB_ETH_TX_CQ_INDEX;
 		context->cstorm_st_context.status_block_id = sb_id;
 
 		context->xstorm_ag_context.cdu_reserved =
@@ -4535,7 +4520,7 @@
 	int i;
 
 	tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
-	tstorm_client.statistics_counter_id = 0;
+	tstorm_client.statistics_counter_id = BP_CL_ID(bp);
 	tstorm_client.config_flags =
 				TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
 #ifdef BCM_VLAN
@@ -4579,7 +4564,7 @@
 	int func = BP_FUNC(bp);
 	int i;
 
-	DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
+	DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
 
 	switch (mode) {
 	case BNX2X_RX_MODE_NONE: /* no Rx */
@@ -4617,13 +4602,35 @@
 		bnx2x_set_client_config(bp);
 }
 
-static void bnx2x_init_internal(struct bnx2x *bp)
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+	int i;
+
+	/* Zero this manually as its initialization is
+	   currently missing in the initTool */
+	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
+}
+
+static void bnx2x_init_internal_port(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+}
+
+static void bnx2x_init_internal_func(struct bnx2x *bp)
 {
 	struct tstorm_eth_function_common_config tstorm_config = {0};
 	struct stats_indication_flags stats_flags = {0};
 	int port = BP_PORT(bp);
 	int func = BP_FUNC(bp);
 	int i;
+	u16 max_agg_size;
 
 	if (is_multi(bp)) {
 		tstorm_config.config_flags = MULTI_FLAGS;
@@ -4636,31 +4643,53 @@
 	       TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
 	       (*(u32 *)&tstorm_config));
 
-/*	DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
-	   (*(u32 *)&tstorm_config)); */
-
 	bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
 	bnx2x_set_storm_rx_mode(bp);
 
+	/* reset xstorm per client statistics */
+	for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
+		REG_WR(bp, BAR_XSTRORM_INTMEM +
+		       XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
+		       i*4, 0);
+	}
+	/* reset tstorm per client statistics */
+	for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
+		REG_WR(bp, BAR_TSTRORM_INTMEM +
+		       TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
+		       i*4, 0);
+	}
+
+	/* Init statistics related context */
 	stats_flags.collect_eth = 1;
 
-	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
+	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
 	       ((u32 *)&stats_flags)[0]);
-	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
+	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
 	       ((u32 *)&stats_flags)[1]);
 
-	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
+	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
 	       ((u32 *)&stats_flags)[0]);
-	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
+	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
 	       ((u32 *)&stats_flags)[1]);
 
-	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
+	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
 	       ((u32 *)&stats_flags)[0]);
-	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
+	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
 	       ((u32 *)&stats_flags)[1]);
 
-/*	DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
-	   ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
+	REG_WR(bp, BAR_XSTRORM_INTMEM +
+	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+	REG_WR(bp, BAR_XSTRORM_INTMEM +
+	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+
+	REG_WR(bp, BAR_TSTRORM_INTMEM +
+	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+	REG_WR(bp, BAR_TSTRORM_INTMEM +
+	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
 
 	if (CHIP_IS_E1H(bp)) {
 		REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
@@ -4676,15 +4705,12 @@
 			 bp->e1hov);
 	}
 
-	/* Zero this manualy as its initialization is
-	   currently missing in the initTool */
-	for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
-		REG_WR(bp, BAR_USTRORM_INTMEM +
-		       USTORM_AGG_DATA_OFFSET + 4*i, 0);
-
+	/* Init CQ ring mapping and aggregation size */
+	max_agg_size = min((u32)(bp->rx_buf_use_size +
+				 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
+			   (u32)0xffff);
 	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
-		u16 max_agg_size;
 
 		REG_WR(bp, BAR_USTRORM_INTMEM +
 		       USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
@@ -4693,16 +4719,34 @@
 		       USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
 		       U64_HI(fp->rx_comp_mapping));
 
-		max_agg_size = min((u32)(bp->rx_buf_use_size +
-					 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
-				   (u32)0xffff);
 		REG_WR16(bp, BAR_USTRORM_INTMEM +
 			 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
 			 max_agg_size);
 	}
 }
 
-static void bnx2x_nic_init(struct bnx2x *bp)
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+	switch (load_code) {
+	case FW_MSG_CODE_DRV_LOAD_COMMON:
+		bnx2x_init_internal_common(bp);
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+		bnx2x_init_internal_port(bp);
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		bnx2x_init_internal_func(bp);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+		break;
+	}
+}
+
+static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 {
 	int i;
 
@@ -4717,19 +4761,20 @@
 		DP(NETIF_MSG_IFUP,
 		   "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
 		   bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
-		bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
-			      fp->status_blk_mapping);
+		bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
+			      FP_SB_ID(fp));
+		bnx2x_update_fpsb_idx(fp);
 	}
 
-	bnx2x_init_def_sb(bp, bp->def_status_blk,
-			  bp->def_status_blk_mapping, DEF_SB_ID);
+	bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
+			  DEF_SB_ID);
+	bnx2x_update_dsb_idx(bp);
 	bnx2x_update_coalesce(bp);
 	bnx2x_init_rx_rings(bp);
 	bnx2x_init_tx_ring(bp);
 	bnx2x_init_sp_ring(bp);
 	bnx2x_init_context(bp);
-	bnx2x_init_internal(bp);
-	bnx2x_storm_stats_init(bp);
+	bnx2x_init_internal(bp, load_code);
 	bnx2x_init_ind_table(bp);
 	bnx2x_int_enable(bp);
 }
@@ -4878,7 +4923,7 @@
 	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
 	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
 	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
-	NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
 
 	/*  Write 0 to parser credits for CFC search request */
 	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -4933,7 +4978,7 @@
 	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
 	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
 	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
-	NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
 
 	/* Write 0 to parser credits for CFC search request */
 	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -5000,7 +5045,7 @@
 	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
 	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
 	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
-	NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
 
 	DP(NETIF_MSG_HW, "done\n");
 
@@ -5089,11 +5134,6 @@
 	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
 #endif
 
-#ifndef BCM_ISCSI
-		/* set NIC mode */
-		REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
-
 	REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
 #ifdef BCM_ISCSI
 	REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
@@ -5163,6 +5203,8 @@
 	}
 
 	bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+	/* set NIC mode */
+	REG_WR(bp, PRS_REG_NIC_MODE, 1);
 	if (CHIP_IS_E1H(bp))
 		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
 
@@ -5333,6 +5375,13 @@
 		       ((u32 *)&tmp)[1]);
 	}
 
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_common_init_phy(bp, bp->common.shmem_base);
+		bnx2x_release_phy_lock(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+
 	return 0;
 }
 
@@ -5638,18 +5687,23 @@
 	int func = BP_FUNC(bp);
 	u32 seq = ++bp->fw_seq;
 	u32 rc = 0;
+	u32 cnt = 1;
+	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
 
 	SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
 	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
 
-	/* let the FW do it's magic ... */
-	msleep(100); /* TBD */
+	do {
+		/* let the FW do it's magic ... */
+		msleep(delay);
 
-	if (CHIP_REV_IS_SLOW(bp))
-		msleep(900);
+		rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
 
-	rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
-	DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
+		/* Give the FW up to 2 second (200*10ms) */
+	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
+
+	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+	   cnt*delay, rc, seq);
 
 	/* is this a reply to our command? */
 	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -5713,6 +5767,7 @@
 			       NUM_RCQ_BD);
 
 		/* SGE ring */
+		BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
 		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
 			       bnx2x_fp(bp, i, rx_sge_mapping),
 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5890,7 +5945,8 @@
 			dev_kfree_skb(skb);
 		}
 		if (!fp->disable_tpa)
-			bnx2x_free_tpa_pool(bp, fp,
+			bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
+					    ETH_MAX_AGGREGATION_QUEUES_E1 :
 					    ETH_MAX_AGGREGATION_QUEUES_E1H);
 	}
 }
@@ -5976,8 +6032,8 @@
 				 bnx2x_msix_fp_int, 0,
 				 bp->dev->name, &bp->fp[i]);
 		if (rc) {
-			BNX2X_ERR("request fp #%d irq failed  rc %d\n",
-				  i + offset, rc);
+			BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
+				  i + offset, -rc);
 			bnx2x_free_msix_irqs(bp);
 			return -EBUSY;
 		}
@@ -6004,7 +6060,7 @@
  * Init service functions
  */
 
-static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
+static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
 {
 	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
 	int port = BP_PORT(bp);
@@ -6026,11 +6082,15 @@
 	config->config_table[0].cam_entry.lsb_mac_addr =
 					swab16(*(u16 *)&bp->dev->dev_addr[4]);
 	config->config_table[0].cam_entry.flags = cpu_to_le16(port);
-	config->config_table[0].target_table_entry.flags = 0;
+	if (set)
+		config->config_table[0].target_table_entry.flags = 0;
+	else
+		CAM_INVALIDATE(config->config_table[0]);
 	config->config_table[0].target_table_entry.client_id = 0;
 	config->config_table[0].target_table_entry.vlan_id = 0;
 
-	DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
+	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
+	   (set ? "setting" : "clearing"),
 	   config->config_table[0].cam_entry.msb_mac_addr,
 	   config->config_table[0].cam_entry.middle_mac_addr,
 	   config->config_table[0].cam_entry.lsb_mac_addr);
@@ -6040,8 +6100,11 @@
 	config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
 	config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
 	config->config_table[1].cam_entry.flags = cpu_to_le16(port);
-	config->config_table[1].target_table_entry.flags =
+	if (set)
+		config->config_table[1].target_table_entry.flags =
 				TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
+	else
+		CAM_INVALIDATE(config->config_table[1]);
 	config->config_table[1].target_table_entry.client_id = 0;
 	config->config_table[1].target_table_entry.vlan_id = 0;
 
@@ -6050,12 +6113,12 @@
 		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
 }
 
-static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
+static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
 {
 	struct mac_configuration_cmd_e1h *config =
 		(struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
 
-	if (bp->state != BNX2X_STATE_OPEN) {
+	if (set && (bp->state != BNX2X_STATE_OPEN)) {
 		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
 		return;
 	}
@@ -6079,9 +6142,14 @@
 	config->config_table[0].client_id = BP_L_ID(bp);
 	config->config_table[0].vlan_id = 0;
 	config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
-	config->config_table[0].flags = BP_PORT(bp);
+	if (set)
+		config->config_table[0].flags = BP_PORT(bp);
+	else
+		config->config_table[0].flags =
+				MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
 
-	DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
+	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
+	   (set ? "setting" : "clearing"),
 	   config->config_table[0].msb_mac_addr,
 	   config->config_table[0].middle_mac_addr,
 	   config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
@@ -6106,13 +6174,13 @@
 			bnx2x_rx_int(bp->fp, 10);
 			/* if index is different from 0
 			 * the reply for some commands will
-			 * be on the none default queue
+			 * be on the non default queue
 			 */
 			if (idx)
 				bnx2x_rx_int(&bp->fp[idx], 10);
 		}
-		mb(); /* state is changed by bnx2x_sp_event() */
 
+		mb(); /* state is changed by bnx2x_sp_event() */
 		if (*state_p == state)
 			return 0;
 
@@ -6167,7 +6235,6 @@
 {
 	u32 load_code;
 	int i, rc;
-
 #ifdef BNX2X_STOP_ON_ERROR
 	if (unlikely(bp->panic))
 		return -EPERM;
@@ -6183,22 +6250,24 @@
 	if (!BP_NOMCP(bp)) {
 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
 		if (!load_code) {
-			BNX2X_ERR("MCP response failure, unloading\n");
+			BNX2X_ERR("MCP response failure, aborting\n");
 			return -EBUSY;
 		}
 		if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
 			return -EBUSY; /* other port in diagnostic mode */
 
 	} else {
+		int port = BP_PORT(bp);
+
 		DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
 		   load_count[0], load_count[1], load_count[2]);
 		load_count[0]++;
-		load_count[1 + BP_PORT(bp)]++;
+		load_count[1 + port]++;
 		DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
 		   load_count[0], load_count[1], load_count[2]);
 		if (load_count[0] == 1)
 			load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
-		else if (load_count[1 + BP_PORT(bp)] == 1)
+		else if (load_count[1 + port] == 1)
 			load_code = FW_MSG_CODE_DRV_LOAD_PORT;
 		else
 			load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6247,9 +6316,6 @@
 		bnx2x_fp(bp, i, disable_tpa) =
 					((bp->flags & TPA_ENABLE_FLAG) == 0);
 
-	/* Disable interrupt handling until HW is initialized */
-	atomic_set(&bp->intr_sem, 1);
-
 	if (bp->flags & USING_MSIX_FLAG) {
 		rc = bnx2x_req_msix_irqs(bp);
 		if (rc) {
@@ -6276,17 +6342,14 @@
 		goto load_error;
 	}
 
-	/* Enable interrupt handling */
-	atomic_set(&bp->intr_sem, 0);
-
 	/* Setup NIC internals and enable interrupts */
-	bnx2x_nic_init(bp);
+	bnx2x_nic_init(bp, load_code);
 
 	/* Send LOAD_DONE command to MCP */
 	if (!BP_NOMCP(bp)) {
 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
 		if (!load_code) {
-			BNX2X_ERR("MCP response failure, unloading\n");
+			BNX2X_ERR("MCP response failure, aborting\n");
 			rc = -EBUSY;
 			goto load_int_disable;
 		}
@@ -6301,11 +6364,12 @@
 	for_each_queue(bp, i)
 		napi_enable(&bnx2x_fp(bp, i, napi));
 
+	/* Enable interrupt handling */
+	atomic_set(&bp->intr_sem, 0);
+
 	rc = bnx2x_setup_leading(bp);
 	if (rc) {
-#ifdef BNX2X_STOP_ON_ERROR
-		bp->panic = 1;
-#endif
+		BNX2X_ERR("Setup leading failed!\n");
 		goto load_stop_netif;
 	}
 
@@ -6323,9 +6387,9 @@
 		}
 
 	if (CHIP_IS_E1(bp))
-		bnx2x_set_mac_addr_e1(bp);
+		bnx2x_set_mac_addr_e1(bp, 1);
 	else
-		bnx2x_set_mac_addr_e1h(bp);
+		bnx2x_set_mac_addr_e1h(bp, 1);
 
 	if (bp->port.pmf)
 		bnx2x_initial_phy_init(bp);
@@ -6339,7 +6403,6 @@
 		break;
 
 	case LOAD_OPEN:
-		/* IRQ is only requested from bnx2x_open */
 		netif_start_queue(bp->dev);
 		bnx2x_set_rx_mode(bp->dev);
 		if (bp->flags & USING_MSIX_FLAG)
@@ -6378,8 +6441,7 @@
 	/* Free SKBs, SGEs, TPA pool and driver internals */
 	bnx2x_free_skbs(bp);
 	for_each_queue(bp, i)
-		bnx2x_free_rx_sge_range(bp, bp->fp + i,
-					RX_SGE_CNT*NUM_RX_SGE_PAGES);
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 load_error:
 	bnx2x_free_mem(bp);
 
@@ -6411,7 +6473,7 @@
 	return rc;
 }
 
-static void bnx2x_stop_leading(struct bnx2x *bp)
+static int bnx2x_stop_leading(struct bnx2x *bp)
 {
 	u16 dsb_sp_prod_idx;
 	/* if the other port is handling traffic,
@@ -6429,7 +6491,7 @@
 	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
 			       &(bp->fp[0].state), 1);
 	if (rc) /* timeout */
-		return;
+		return rc;
 
 	dsb_sp_prod_idx = *bp->dsb_sp_prod;
 
@@ -6441,20 +6503,24 @@
 	   so there is not much to do if this times out
 	 */
 	while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
-		msleep(1);
 		if (!cnt) {
 			DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
 			   "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
 			   *bp->dsb_sp_prod, dsb_sp_prod_idx);
 #ifdef BNX2X_STOP_ON_ERROR
 			bnx2x_panic();
+#else
+			rc = -EBUSY;
 #endif
 			break;
 		}
 		cnt--;
+		msleep(1);
 	}
 	bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
 	bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
+
+	return rc;
 }
 
 static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6496,7 +6562,7 @@
 	val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
 	if (val)
 		DP(NETIF_MSG_IFDOWN,
-		   "BRB1 is not empty  %d blooks are occupied\n", val);
+		   "BRB1 is not empty  %d blocks are occupied\n", val);
 
 	/* TODO: Close Doorbell port? */
 }
@@ -6536,11 +6602,12 @@
 	}
 }
 
-/* msut be called with rtnl_lock */
+/* must be called with rtnl_lock */
 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 {
+	int port = BP_PORT(bp);
 	u32 reset_code = 0;
-	int i, cnt;
+	int i, cnt, rc;
 
 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
 
@@ -6557,22 +6624,17 @@
 		 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
-	/* Wait until all fast path tasks complete */
+	/* Wait until tx fast path tasks complete */
 	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
-#ifdef BNX2X_STOP_ON_ERROR
-#ifdef __powerpc64__
-		DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
-#else
-		DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
-#endif
-		   fp->tpa_queue_used);
-#endif
 		cnt = 1000;
 		smp_rmb();
-		while (bnx2x_has_work(fp)) {
-			msleep(1);
+		while (BNX2X_HAS_TX_WORK(fp)) {
+
+			if (!netif_running(bp->dev))
+				bnx2x_tx_int(fp, 1000);
+
 			if (!cnt) {
 				BNX2X_ERR("timeout waiting for queue[%d]\n",
 					  i);
@@ -6584,14 +6646,13 @@
 #endif
 			}
 			cnt--;
+			msleep(1);
 			smp_rmb();
 		}
 	}
 
-	/* Wait until all slow path tasks complete */
-	cnt = 1000;
-	while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
-		msleep(1);
+	/* Give HW time to discard old tx messages */
+	msleep(1);
 
 	for_each_queue(bp, i)
 		napi_disable(&bnx2x_fp(bp, i, napi));
@@ -6601,52 +6662,79 @@
 	/* Release IRQs */
 	bnx2x_free_irq(bp);
 
-	if (bp->flags & NO_WOL_FLAG)
-		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+	if (unload_mode == UNLOAD_NORMAL)
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 
-	else if (bp->wol) {
-		u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	else if (bp->flags & NO_WOL_FLAG) {
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+		if (CHIP_IS_E1H(bp))
+			REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
+
+	} else if (bp->wol) {
+		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 		u8 *mac_addr = bp->dev->dev_addr;
 		u32 val;
-
 		/* The mac address is written to entries 1-4 to
 		   preserve entry 0 which is used by the PMF */
+		u8 entry = (BP_E1HVN(bp) + 1)*8;
+
 		val = (mac_addr[0] << 8) | mac_addr[1];
-		EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
+		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
 
 		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
 		      (mac_addr[4] << 8) | mac_addr[5];
-		EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
-			val);
+		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
 
 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
 
 	} else
 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 
+	if (CHIP_IS_E1(bp)) {
+		struct mac_configuration_cmd *config =
+						bnx2x_sp(bp, mcast_config);
+
+		bnx2x_set_mac_addr_e1(bp, 0);
+
+		for (i = 0; i < config->hdr.length_6b; i++)
+			CAM_INVALIDATE(config->config_table[i]);
+
+		config->hdr.length_6b = i;
+		if (CHIP_REV_IS_SLOW(bp))
+			config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
+		else
+			config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
+		config->hdr.client_id = BP_CL_ID(bp);
+		config->hdr.reserved1 = 0;
+
+		bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+			      U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
+			      U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
+
+	} else { /* E1H */
+		bnx2x_set_mac_addr_e1h(bp, 0);
+
+		for (i = 0; i < MC_HASH_SIZE; i++)
+			REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+	}
+
+	if (CHIP_IS_E1H(bp))
+		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
 	/* Close multi and leading connections
 	   Completions for ramrods are collected in a synchronous way */
 	for_each_nondefault_queue(bp, i)
 		if (bnx2x_stop_multi(bp, i))
 			goto unload_error;
 
-	if (CHIP_IS_E1H(bp))
-		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
-
-	bnx2x_stop_leading(bp);
-#ifdef BNX2X_STOP_ON_ERROR
-	/* If ramrod completion timed out - break here! */
-	if (bp->panic) {
+	rc = bnx2x_stop_leading(bp);
+	if (rc) {
 		BNX2X_ERR("Stop leading failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
 		return -EBUSY;
-	}
+#else
+		goto unload_error;
 #endif
-
-	if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
-	    (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
-		DP(NETIF_MSG_IFDOWN, "failed to close leading properly!  "
-		   "state 0x%x  fp[0].state 0x%x\n",
-		   bp->state, bp->fp[0].state);
 	}
 
 unload_error:
@@ -6656,12 +6744,12 @@
 		DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
 		   load_count[0], load_count[1], load_count[2]);
 		load_count[0]--;
-		load_count[1 + BP_PORT(bp)]--;
+		load_count[1 + port]--;
 		DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
 		   load_count[0], load_count[1], load_count[2]);
 		if (load_count[0] == 0)
 			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
-		else if (load_count[1 + BP_PORT(bp)] == 0)
+		else if (load_count[1 + port] == 0)
 			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
 		else
 			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6681,8 +6769,7 @@
 	/* Free SKBs, SGEs, TPA pool and driver internals */
 	bnx2x_free_skbs(bp);
 	for_each_queue(bp, i)
-		bnx2x_free_rx_sge_range(bp, bp->fp + i,
-					RX_SGE_CNT*NUM_RX_SGE_PAGES);
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 	bnx2x_free_mem(bp);
 
 	bp->state = BNX2X_STATE_CLOSED;
@@ -6733,56 +6820,93 @@
 		/* Check if it is the UNDI driver
 		 * UNDI driver initializes CID offset for normal bell to 0x7
 		 */
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
 		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
 		if (val == 0x7) {
 			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-			/* save our func and fw_seq */
+			/* save our func */
 			int func = BP_FUNC(bp);
-			u16 fw_seq = bp->fw_seq;
+			u32 swap_en;
+			u32 swap_val;
 
 			BNX2X_DEV_INFO("UNDI is active! reset device\n");
 
 			/* try unload UNDI on port 0 */
 			bp->func = 0;
-			bp->fw_seq = (SHMEM_RD(bp,
-					     func_mb[bp->func].drv_mb_header) &
-				      DRV_MSG_SEQ_NUMBER_MASK);
-
+			bp->fw_seq =
+			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+				DRV_MSG_SEQ_NUMBER_MASK);
 			reset_code = bnx2x_fw_command(bp, reset_code);
-			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
 
 			/* if UNDI is loaded on the other port */
 			if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
 
+				/* send "DONE" for previous unload */
+				bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+
+				/* unload UNDI on port 1 */
 				bp->func = 1;
-				bp->fw_seq = (SHMEM_RD(bp,
-					     func_mb[bp->func].drv_mb_header) &
-					      DRV_MSG_SEQ_NUMBER_MASK);
+				bp->fw_seq =
+			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+					DRV_MSG_SEQ_NUMBER_MASK);
+				reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 
-				bnx2x_fw_command(bp,
-					     DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
-				bnx2x_fw_command(bp,
-						 DRV_MSG_CODE_UNLOAD_DONE);
-
-				/* restore our func and fw_seq */
-				bp->func = func;
-				bp->fw_seq = fw_seq;
+				bnx2x_fw_command(bp, reset_code);
 			}
 
+			REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
+				    HC_REG_CONFIG_0), 0x1000);
+
+			/* close input traffic and wait for it */
+			/* Do not rcv packets to BRB */
+			REG_WR(bp,
+			      (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
+					     NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+			/* Do not direct rcv packets that are not for MCP to
+			 * the BRB */
+			REG_WR(bp,
+			       (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
+					      NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+			/* clear AEU */
+			REG_WR(bp,
+			     (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+					    MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+			msleep(10);
+
+			/* save NIG port swap info */
+			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+			swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 			/* reset device */
 			REG_WR(bp,
 			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-			       0xd3ffff7f);
+			       0xd3ffffff);
 			REG_WR(bp,
 			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 			       0x1403);
+			/* take the NIG out of reset and restore swap values */
+			REG_WR(bp,
+			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+			       MISC_REGISTERS_RESET_REG_1_RST_NIG);
+			REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
+			REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+
+			/* send unload done to the MCP */
+			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+
+			/* restore our func and fw_seq */
+			bp->func = func;
+			bp->fw_seq =
+			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+				DRV_MSG_SEQ_NUMBER_MASK);
 		}
+		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
 	}
 }
 
 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 {
 	u32 val, val2, val3, val4, id;
+	u16 pmc;
 
 	/* Get the chip revision id and number. */
 	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6840,8 +6964,16 @@
 		BNX2X_ERR("This driver needs bc_ver %X but found %X,"
 			  " please upgrade BC\n", BNX2X_BC_VER, val);
 	}
-	BNX2X_DEV_INFO("%sWoL Capable\n",
-		       (bp->flags & NO_WOL_FLAG)? "Not " : "");
+
+	if (BP_E1HVN(bp) == 0) {
+		pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+		bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+	} else {
+		/* no WOL capability for E1HVN != 0 */
+		bp->flags |= NO_WOL_FLAG;
+	}
+	BNX2X_DEV_INFO("%sWoL capable\n",
+		       (bp->flags & NO_WOL_FLAG) ? "Not " : "");
 
 	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
 	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
@@ -7274,9 +7406,8 @@
 		bp->mf_config =
 			SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
 
-		val =
-		   (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
-		    FUNC_MF_CFG_E1HOV_TAG_MASK);
+		val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
+		       FUNC_MF_CFG_E1HOV_TAG_MASK);
 		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
 
 			bp->e1hov = val;
@@ -7324,7 +7455,7 @@
 
 	if (BP_NOMCP(bp)) {
 		/* only supposed to happen on emulation/FPGA */
-		BNX2X_ERR("warning rendom MAC workaround active\n");
+		BNX2X_ERR("warning random MAC workaround active\n");
 		random_ether_addr(bp->dev->dev_addr);
 		memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 	}
@@ -7337,8 +7468,8 @@
 	int func = BP_FUNC(bp);
 	int rc;
 
-	if (nomcp)
-		bp->flags |= NO_MCP_FLAG;
+	/* Disable interrupt handling until HW is initialized */
+	atomic_set(&bp->intr_sem, 1);
 
 	mutex_init(&bp->port.phy_mutex);
 
@@ -7377,8 +7508,6 @@
 	bp->tx_ticks = 50;
 	bp->rx_ticks = 25;
 
-	bp->stats_ticks = 1000000 & 0xffff00;
-
 	bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
 	bp->current_interval = (poll ? poll : bp->timer_interval);
 
@@ -7628,25 +7757,25 @@
 			      struct ethtool_drvinfo *info)
 {
 	struct bnx2x *bp = netdev_priv(dev);
-	char phy_fw_ver[PHY_FW_VER_LEN];
+	u8 phy_fw_ver[PHY_FW_VER_LEN];
 
 	strcpy(info->driver, DRV_MODULE_NAME);
 	strcpy(info->version, DRV_MODULE_VERSION);
 
 	phy_fw_ver[0] = '\0';
 	if (bp->port.pmf) {
-		bnx2x_phy_hw_lock(bp);
+		bnx2x_acquire_phy_lock(bp);
 		bnx2x_get_ext_phy_fw_version(&bp->link_params,
 					     (bp->state != BNX2X_STATE_CLOSED),
 					     phy_fw_ver, PHY_FW_VER_LEN);
-		bnx2x_phy_hw_unlock(bp);
+		bnx2x_release_phy_lock(bp);
 	}
 
-	snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
-		 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
-		 BCM_5710_FW_REVISION_VERSION,
-		 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
-		 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
+	snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
+		 (bp->common.bc_ver & 0xff0000) >> 16,
+		 (bp->common.bc_ver & 0xff00) >> 8,
+		 (bp->common.bc_ver & 0xff),
+		 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
 	strcpy(info->bus_info, pci_name(bp->pdev));
 	info->n_stats = BNX2X_NUM_STATS;
 	info->testinfo_len = BNX2X_NUM_TESTS;
@@ -8097,7 +8226,7 @@
 	if (eeprom->magic == 0x00504859)
 		if (bp->port.pmf) {
 
-			bnx2x_phy_hw_lock(bp);
+			bnx2x_acquire_phy_lock(bp);
 			rc = bnx2x_flash_download(bp, BP_PORT(bp),
 					     bp->link_params.ext_phy_config,
 					     (bp->state != BNX2X_STATE_CLOSED),
@@ -8109,7 +8238,7 @@
 				rc |= bnx2x_phy_init(&bp->link_params,
 						     &bp->link_vars);
 			}
-			bnx2x_phy_hw_unlock(bp);
+			bnx2x_release_phy_lock(bp);
 
 		} else /* Only the PMF can access the PHY */
 			return -EINVAL;
@@ -8128,7 +8257,6 @@
 
 	coal->rx_coalesce_usecs = bp->rx_ticks;
 	coal->tx_coalesce_usecs = bp->tx_ticks;
-	coal->stats_block_coalesce_usecs = bp->stats_ticks;
 
 	return 0;
 }
@@ -8146,44 +8274,12 @@
 	if (bp->tx_ticks > 0x3000)
 		bp->tx_ticks = 0x3000;
 
-	bp->stats_ticks = coal->stats_block_coalesce_usecs;
-	if (bp->stats_ticks > 0xffff00)
-		bp->stats_ticks = 0xffff00;
-	bp->stats_ticks &= 0xffff00;
-
 	if (netif_running(dev))
 		bnx2x_update_coalesce(bp);
 
 	return 0;
 }
 
-static int bnx2x_set_flags(struct net_device *dev, u32 data)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int changed = 0;
-	int rc = 0;
-
-	if (data & ETH_FLAG_LRO) {
-		if (!(dev->features & NETIF_F_LRO)) {
-			dev->features |= NETIF_F_LRO;
-			bp->flags |= TPA_ENABLE_FLAG;
-			changed = 1;
-		}
-
-	} else if (dev->features & NETIF_F_LRO) {
-		dev->features &= ~NETIF_F_LRO;
-		bp->flags &= ~TPA_ENABLE_FLAG;
-		changed = 1;
-	}
-
-	if (changed && netif_running(dev)) {
-		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
-	}
-
-	return rc;
-}
-
 static void bnx2x_get_ringparam(struct net_device *dev,
 				struct ethtool_ringparam *ering)
 {
@@ -8266,7 +8362,7 @@
 
 	if (epause->autoneg) {
 		if (!(bp->port.supported & SUPPORTED_Autoneg)) {
-			DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+			DP(NETIF_MSG_LINK, "autoneg not supported\n");
 			return -EINVAL;
 		}
 
@@ -8285,6 +8381,34 @@
 	return 0;
 }
 
+static int bnx2x_set_flags(struct net_device *dev, u32 data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int changed = 0;
+	int rc = 0;
+
+	/* TPA requires Rx CSUM offloading */
+	if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
+		if (!(dev->features & NETIF_F_LRO)) {
+			dev->features |= NETIF_F_LRO;
+			bp->flags |= TPA_ENABLE_FLAG;
+			changed = 1;
+		}
+
+	} else if (dev->features & NETIF_F_LRO) {
+		dev->features &= ~NETIF_F_LRO;
+		bp->flags &= ~TPA_ENABLE_FLAG;
+		changed = 1;
+	}
+
+	if (changed && netif_running(dev)) {
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+	}
+
+	return rc;
+}
+
 static u32 bnx2x_get_rx_csum(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
@@ -8295,9 +8419,19 @@
 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
 {
 	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
 
 	bp->rx_csum = data;
-	return 0;
+
+	/* Disable TPA, when Rx CSUM is disabled. Otherwise all
+	   TPA'ed packets will be discarded due to wrong TCP CSUM */
+	if (!data) {
+		u32 flags = ethtool_op_get_flags(dev);
+
+		rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
+	}
+
+	return rc;
 }
 
 static int bnx2x_set_tso(struct net_device *dev, u32 data)
@@ -8335,6 +8469,7 @@
 {
 	int idx, i, rc = -ENODEV;
 	u32 wr_val = 0;
+	int port = BP_PORT(bp);
 	static const struct {
 		u32  offset0;
 		u32  offset1;
@@ -8400,7 +8535,6 @@
 
 		for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
 			u32 offset, mask, save_val, val;
-			int port = BP_PORT(bp);
 
 			offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
 			mask = reg_tbl[i].mask;
@@ -8446,16 +8580,17 @@
 	static const struct {
 		char *name;
 		u32 offset;
-		u32 mask;
+		u32 e1_mask;
+		u32 e1h_mask;
 	} prty_tbl[] = {
-		{ "CCM_REG_CCM_PRTY_STS",     CCM_REG_CCM_PRTY_STS,     0 },
-		{ "CFC_REG_CFC_PRTY_STS",     CFC_REG_CFC_PRTY_STS,     0 },
-		{ "DMAE_REG_DMAE_PRTY_STS",   DMAE_REG_DMAE_PRTY_STS,   0 },
-		{ "TCM_REG_TCM_PRTY_STS",     TCM_REG_TCM_PRTY_STS,     0 },
-		{ "UCM_REG_UCM_PRTY_STS",     UCM_REG_UCM_PRTY_STS,     0 },
-		{ "XCM_REG_XCM_PRTY_STS",     XCM_REG_XCM_PRTY_STS,     0x1 },
+		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
+		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
+		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
+		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
+		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
+		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
 
-		{ NULL, 0xffffffff, 0 }
+		{ NULL, 0xffffffff, 0, 0 }
 	};
 
 	if (!netif_running(bp->dev))
@@ -8469,7 +8604,8 @@
 	/* Check the parity status */
 	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
 		val = REG_RD(bp, prty_tbl[i].offset);
-		if (val & ~(prty_tbl[i].mask)) {
+		if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
+		    (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
 			DP(NETIF_MSG_HW,
 			   "%s is 0x%x\n", prty_tbl[i].name, val);
 			goto test_mem_exit;
@@ -8539,15 +8675,15 @@
 
 	if (loopback_mode == BNX2X_MAC_LOOPBACK) {
 		bp->link_params.loopback_mode = LOOPBACK_BMAC;
-		bnx2x_phy_hw_lock(bp);
+		bnx2x_acquire_phy_lock(bp);
 		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
-		bnx2x_phy_hw_unlock(bp);
+		bnx2x_release_phy_lock(bp);
 
 	} else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
 		bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
-		bnx2x_phy_hw_lock(bp);
+		bnx2x_acquire_phy_lock(bp);
 		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
-		bnx2x_phy_hw_unlock(bp);
+		bnx2x_release_phy_lock(bp);
 		/* wait until link state is restored */
 		bnx2x_wait_for_link(bp, link_up);
 
@@ -8771,7 +8907,7 @@
 	if (!netif_running(dev))
 		return;
 
-	/* offline tests are not suppoerted in MF mode */
+	/* offline tests are not supported in MF mode */
 	if (IS_E1HMF(bp))
 		etest->flags &= ~ETH_TEST_FL_OFFLINE;
 
@@ -8827,76 +8963,99 @@
 	long offset;
 	int size;
 	u32 flags;
-	char string[ETH_GSTRING_LEN];
+#define STATS_FLAGS_PORT		1
+#define STATS_FLAGS_FUNC		2
+	u8 string[ETH_GSTRING_LEN];
 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
-/* 1 */	{ STATS_OFFSET32(valid_bytes_received_hi),     8, 1, "rx_bytes" },
-	{ STATS_OFFSET32(error_bytes_received_hi),     8, 1, "rx_error_bytes" },
-	{ STATS_OFFSET32(total_bytes_transmitted_hi),  8, 1, "tx_bytes" },
-	{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" },
+/* 1 */	{ STATS_OFFSET32(valid_bytes_received_hi),
+				8, STATS_FLAGS_FUNC, "rx_bytes" },
+	{ STATS_OFFSET32(error_bytes_received_hi),
+				8, STATS_FLAGS_FUNC, "rx_error_bytes" },
+	{ STATS_OFFSET32(total_bytes_transmitted_hi),
+				8, STATS_FLAGS_FUNC, "tx_bytes" },
+	{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+				8, STATS_FLAGS_PORT, "tx_error_bytes" },
 	{ STATS_OFFSET32(total_unicast_packets_received_hi),
-						8, 1, "rx_ucast_packets" },
+				8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
 	{ STATS_OFFSET32(total_multicast_packets_received_hi),
-						8, 1, "rx_mcast_packets" },
+				8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
 	{ STATS_OFFSET32(total_broadcast_packets_received_hi),
-						8, 1, "rx_bcast_packets" },
+				8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
 	{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
-						8, 1, "tx_packets" },
+				8, STATS_FLAGS_FUNC, "tx_packets" },
 	{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
-						8, 0, "tx_mac_errors" },
+				8, STATS_FLAGS_PORT, "tx_mac_errors" },
 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
-						8, 0, "tx_carrier_errors" },
+				8, STATS_FLAGS_PORT, "tx_carrier_errors" },
 	{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
-						8, 0, "rx_crc_errors" },
+				8, STATS_FLAGS_PORT, "rx_crc_errors" },
 	{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
-						8, 0, "rx_align_errors" },
+				8, STATS_FLAGS_PORT, "rx_align_errors" },
 	{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
-						8, 0, "tx_single_collisions" },
+				8, STATS_FLAGS_PORT, "tx_single_collisions" },
 	{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
-						8, 0, "tx_multi_collisions" },
+				8, STATS_FLAGS_PORT, "tx_multi_collisions" },
 	{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
-						8, 0, "tx_deferred" },
+				8, STATS_FLAGS_PORT, "tx_deferred" },
 	{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
-						8, 0, "tx_excess_collisions" },
+				8, STATS_FLAGS_PORT, "tx_excess_collisions" },
 	{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
-						8, 0, "tx_late_collisions" },
+				8, STATS_FLAGS_PORT, "tx_late_collisions" },
 	{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
-						8, 0, "tx_total_collisions" },
+				8, STATS_FLAGS_PORT, "tx_total_collisions" },
 	{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
-						8, 0, "rx_fragments" },
-/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" },
+				8, STATS_FLAGS_PORT, "rx_fragments" },
+/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+				8, STATS_FLAGS_PORT, "rx_jabbers" },
 	{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
-						8, 0, "rx_undersize_packets" },
+				8, STATS_FLAGS_PORT, "rx_undersize_packets" },
 	{ STATS_OFFSET32(jabber_packets_received),
-						4, 1, "rx_oversize_packets" },
+				4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
 	{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
-						8, 0, "tx_64_byte_packets" },
+				8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
 	{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
-					8, 0, "tx_65_to_127_byte_packets" },
+			8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
 	{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
-					8, 0, "tx_128_to_255_byte_packets" },
+			8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
 	{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
-					8, 0, "tx_256_to_511_byte_packets" },
+			8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
 	{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
-					8, 0, "tx_512_to_1023_byte_packets" },
+			8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
 	{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
-					8, 0, "tx_1024_to_1522_byte_packets" },
+			8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
 	{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
-					8, 0, "tx_1523_to_9022_byte_packets" },
+			8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
-						8, 0, "rx_xon_frames" },
+				8, STATS_FLAGS_PORT, "rx_xon_frames" },
 	{ STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
-						8, 0, "rx_xoff_frames" },
-	{ STATS_OFFSET32(tx_stat_outxonsent_hi),  8, 0, "tx_xon_frames" },
-	{ STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" },
+				8, STATS_FLAGS_PORT, "rx_xoff_frames" },
+	{ STATS_OFFSET32(tx_stat_outxonsent_hi),
+				8, STATS_FLAGS_PORT, "tx_xon_frames" },
+	{ STATS_OFFSET32(tx_stat_outxoffsent_hi),
+				8, STATS_FLAGS_PORT, "tx_xoff_frames" },
 	{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
-						8, 0, "rx_mac_ctrl_frames" },
-	{ STATS_OFFSET32(mac_filter_discard),   4, 1, "rx_filtered_packets" },
-	{ STATS_OFFSET32(no_buff_discard),      4, 1, "rx_discards" },
-	{ STATS_OFFSET32(xxoverflow_discard),   4, 1, "rx_fw_discards" },
-	{ STATS_OFFSET32(brb_drop_hi),          8, 1, "brb_discard" },
-/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" }
+				8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+	{ STATS_OFFSET32(mac_filter_discard),
+				4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+	{ STATS_OFFSET32(no_buff_discard),
+				4, STATS_FLAGS_FUNC, "rx_discards" },
+	{ STATS_OFFSET32(xxoverflow_discard),
+				4, STATS_FLAGS_PORT, "rx_fw_discards" },
+	{ STATS_OFFSET32(brb_drop_hi),
+				8, STATS_FLAGS_PORT, "brb_discard" },
+	{ STATS_OFFSET32(brb_truncate_hi),
+				8, STATS_FLAGS_PORT, "brb_truncate" },
+/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
+				4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
+	{ STATS_OFFSET32(rx_skb_alloc_failed),
+				4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
+/* 42 */{ STATS_OFFSET32(hw_csum_err),
+				4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
 };
 
+#define IS_NOT_E1HMF_STAT(bp, i) \
+		(IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
+
 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
 	struct bnx2x *bp = netdev_priv(dev);
@@ -8905,7 +9064,7 @@
 	switch (stringset) {
 	case ETH_SS_STATS:
 		for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-			if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
+			if (IS_NOT_E1HMF_STAT(bp, i))
 				continue;
 			strcpy(buf + j*ETH_GSTRING_LEN,
 			       bnx2x_stats_arr[i].string);
@@ -8925,7 +9084,7 @@
 	int i, num_stats = 0;
 
 	for (i = 0; i < BNX2X_NUM_STATS; i++) {
-		if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
+		if (IS_NOT_E1HMF_STAT(bp, i))
 			continue;
 		num_stats++;
 	}
@@ -8940,7 +9099,7 @@
 	int i, j;
 
 	for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-		if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
+		if (IS_NOT_E1HMF_STAT(bp, i))
 			continue;
 
 		if (bnx2x_stats_arr[i].size == 0) {
@@ -9057,7 +9216,7 @@
 				       PCI_PM_CTRL_PME_STATUS));
 
 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
-		/* delay required during transition out of D3hot */
+			/* delay required during transition out of D3hot */
 			msleep(20);
 		break;
 
@@ -9104,17 +9263,16 @@
 
 	bnx2x_update_fpsb_idx(fp);
 
-	if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
-	    (fp->tx_pkt_prod != fp->tx_pkt_cons))
+	if (BNX2X_HAS_TX_WORK(fp))
 		bnx2x_tx_int(fp, budget);
 
-	if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
+	if (BNX2X_HAS_RX_WORK(fp))
 		work_done = bnx2x_rx_int(fp, budget);
 
-	rmb(); /* bnx2x_has_work() reads the status block */
+	rmb(); /* BNX2X_HAS_WORK() reads the status block */
 
 	/* must not complete if we consumed full budget */
-	if ((work_done < budget) && !bnx2x_has_work(fp)) {
+	if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
 
 #ifdef BNX2X_STOP_ON_ERROR
 poll_panic:
@@ -9131,7 +9289,7 @@
 
 
 /* we split the first BD into headers and data BDs
- * to ease the pain of our fellow micocode engineers
+ * to ease the pain of our fellow microcode engineers
  * we use one mapping for both BDs
  * So far this has only been observed to happen
  * in Other Operating Systems(TM)
@@ -9238,7 +9396,7 @@
 			/* Check if LSO packet needs to be copied:
 			   3 = 1 (for headers BD) + 2 (for PBD and last BD) */
 			int wnd_size = MAX_FETCH_BD - 3;
-			/* Number of widnows to check */
+			/* Number of windows to check */
 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
 			int wnd_idx = 0;
 			int frag_idx = 0;
@@ -9340,7 +9498,7 @@
 	   skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
 
-	/* First, check if we need to linearaize the skb
+	/* First, check if we need to linearize the skb
 	   (due to FW restrictions) */
 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
 		/* Statistics of linearization */
@@ -9349,7 +9507,7 @@
 			DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
 			   "silently dropping this SKB\n");
 			dev_kfree_skb_any(skb);
-			return 0;
+			return NETDEV_TX_OK;
 		}
 	}
 
@@ -9372,7 +9530,8 @@
 	tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 	tx_bd->general_data = (UNICAST_ADDRESS <<
 			       ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
-	tx_bd->general_data |= 1; /* header nbd */
+	/* header nbd */
+	tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
 
 	/* remember the first BD of the packet */
 	tx_buf->first_bd = fp->tx_bd_prod;
@@ -9451,7 +9610,7 @@
 
 	tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-	nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
+	nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
 	tx_bd->nbd = cpu_to_le16(nbd);
 	tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
 
@@ -9721,9 +9880,9 @@
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 	if (netif_running(dev)) {
 		if (CHIP_IS_E1(bp))
-			bnx2x_set_mac_addr_e1(bp);
+			bnx2x_set_mac_addr_e1(bp, 1);
 		else
-			bnx2x_set_mac_addr_e1h(bp);
+			bnx2x_set_mac_addr_e1h(bp, 1);
 	}
 
 	return 0;
@@ -9734,6 +9893,7 @@
 {
 	struct mii_ioctl_data *data = if_mii(ifr);
 	struct bnx2x *bp = netdev_priv(dev);
+	int port = BP_PORT(bp);
 	int err;
 
 	switch (cmd) {
@@ -9749,7 +9909,7 @@
 			return -EAGAIN;
 
 		mutex_lock(&bp->port.phy_mutex);
-		err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
+		err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
 				      DEFAULT_PHY_DEV_ADDR,
 				      (data->reg_num & 0x1f), &mii_regval);
 		data->val_out = mii_regval;
@@ -9765,7 +9925,7 @@
 			return -EAGAIN;
 
 		mutex_lock(&bp->port.phy_mutex);
-		err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
+		err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
 				       DEFAULT_PHY_DEV_ADDR,
 				       (data->reg_num & 0x1f), data->val_in);
 		mutex_unlock(&bp->port.phy_mutex);
@@ -10141,7 +10301,7 @@
 
 	netif_device_detach(dev);
 
-	bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
 
 	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
 
@@ -10174,7 +10334,7 @@
 	bnx2x_set_power_state(bp, PCI_D0);
 	netif_device_attach(dev);
 
-	rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+	rc = bnx2x_nic_load(bp, LOAD_OPEN);
 
 	rtnl_unlock();
 
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 15c9a99..a67b0c3 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -6,7 +6,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * The registers description starts with the regsister Access type followed
+ * The registers description starts with the register Access type followed
  * by size in bits. For example [RW 32]. The access types are:
  * R  - Read only
  * RC - Clear on read
@@ -49,7 +49,7 @@
 /* [RW 10] Write client 0: Assert pause threshold. */
 #define BRB1_REG_PAUSE_LOW_THRESHOLD_0				 0x60068
 #define BRB1_REG_PAUSE_LOW_THRESHOLD_1				 0x6006c
-/* [R 24] The number of full blocks occpied by port. */
+/* [R 24] The number of full blocks occupied by port. */
 #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0				 0x60094
 /* [RW 1] Reset the design by software. */
 #define BRB1_REG_SOFT_RESET					 0x600dc
@@ -740,6 +740,7 @@
 #define HC_REG_ATTN_MSG1_ADDR_L 				 0x108020
 #define HC_REG_ATTN_NUM_P0					 0x108038
 #define HC_REG_ATTN_NUM_P1					 0x10803c
+#define HC_REG_COMMAND_REG					 0x108180
 #define HC_REG_CONFIG_0 					 0x108000
 #define HC_REG_CONFIG_1 					 0x108004
 #define HC_REG_FUNC_NUM_P0					 0x1080ac
@@ -1372,6 +1373,23 @@
    be asserted). */
 #define MISC_REG_DRIVER_CONTROL_16				 0xa5f0
 #define MISC_REG_DRIVER_CONTROL_16_SIZE 			 2
+/* [RW 32] The following driver registers(1...16) represent 16 drivers and
+   32 clients. Each client can be controlled by one driver only. One in each
+   bit represent that this driver control the appropriate client (Ex: bit 5
+   is set means this driver control client number 5). addr1 = set; addr0 =
+   clear; read from both addresses will give the same result = status. write
+   to address 1 will set a request to control all the clients that their
+   appropriate bit (in the write command) is set. if the client is free (the
+   appropriate bit in all the other drivers is clear) one will be written to
+   that driver register; if the client isn't free the bit will remain zero.
+   if the appropriate bit is set (the driver request to gain control on a
+   client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+   interrupt will be asserted). write to address 0 will set a request to
+   free all the clients that their appropriate bit (in the write command) is
+   set. if the appropriate bit is clear (the driver request to free a client
+   it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+   be asserted). */
+#define MISC_REG_DRIVER_CONTROL_7				 0xa3c8
 /* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
    only. */
 #define MISC_REG_E1HMF_MODE					 0xa5f8
@@ -1394,13 +1412,13 @@
 #define MISC_REG_GPIO						 0xa490
 /* [R 28] this field hold the last information that caused reserved
    attention. bits [19:0] - address; [22:20] function; [23] reserved;
-   [27:24] the master thatcaused the attention - according to the following
+   [27:24] the master that caused the attention - according to the following
    encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
    dbu; 8 = dmae */
 #define MISC_REG_GRC_RSV_ATTN					 0xa3c0
 /* [R 28] this field hold the last information that caused timeout
    attention. bits [19:0] - address; [22:20] function; [23] reserved;
-   [27:24] the master thatcaused the attention - according to the following
+   [27:24] the master that caused the attention - according to the following
    encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
    dbu; 8 = dmae */
 #define MISC_REG_GRC_TIMEOUT_ATTN				 0xa3c4
@@ -1677,6 +1695,7 @@
 /* [RW 8] init credit counter for port0 in LLH */
 #define NIG_REG_LLH0_XCM_INIT_CREDIT				 0x10554
 #define NIG_REG_LLH0_XCM_MASK					 0x10130
+#define NIG_REG_LLH1_BRB1_DRV_MASK				 0x10248
 /* [RW 1] send to BRB1 if no match on any of RMP rules. */
 #define NIG_REG_LLH1_BRB1_NOT_MCP				 0x102dc
 /* [RW 2] Determine the classification participants. 0: no classification.1:
@@ -1727,6 +1746,9 @@
 /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
    for port0 */
 #define NIG_REG_STAT0_BRB_DISCARD				 0x105f0
+/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
+   for port0 */
+#define NIG_REG_STAT0_BRB_TRUNCATE				 0x105f8
 /* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
    between 1024 and 1522 bytes for port0 */
 #define NIG_REG_STAT0_EGRESS_MAC_PKT0				 0x10750
@@ -2298,7 +2320,7 @@
 /* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
    -128k */
 #define PXP2_REG_RQ_QM_P_SIZE					 0x120050
-/* [RW 1] 1' indicates that the RBC has finished configurating the PSWRQ */
+/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
 #define PXP2_REG_RQ_RBC_DONE					 0x1201b0
 /* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
    001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
@@ -2406,7 +2428,7 @@
 /* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
    buffer reaches this number has_payload will be asserted */
 #define PXP2_REG_WR_DMAE_MPS					 0x1205ec
-/* [RW 10] if Number of entries in dmae fifo will be higer than this
+/* [RW 10] if Number of entries in dmae fifo will be higher than this
    threshold then has_payload indication will be asserted; the default value
    should be equal to &gt;  write MBS size! */
 #define PXP2_REG_WR_DMAE_TH					 0x120368
@@ -2427,7 +2449,7 @@
 /* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
    buffer reaches this number has_payload will be asserted */
 #define PXP2_REG_WR_TSDM_MPS					 0x1205d4
-/* [RW 10] if Number of entries in usdmdp fifo will be higer than this
+/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
    threshold then has_payload indication will be asserted; the default value
    should be equal to &gt;  write MBS size! */
 #define PXP2_REG_WR_USDMDP_TH					 0x120348
@@ -3294,12 +3316,12 @@
 #define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE		 0
 #define CFC_DEBUG1_REG_WRITE_AC 				 (0x1<<4)
 #define CFC_DEBUG1_REG_WRITE_AC_SIZE				 4
-/* [R 1] debug only: This bit indicates wheter indicates that external
+/* [R 1] debug only: This bit indicates whether indicates that external
    buffer was wrapped (oldest data was thrown); Relevant only when
    ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */
 #define DBG_REG_WRAP_ON_EXT_BUFFER				 0xc124
 #define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 			 1
-/* [R 1] debug only: This bit indicates wheter the internal buffer was
+/* [R 1] debug only: This bit indicates whether the internal buffer was
    wrapped (oldest data was thrown) Relevant only when
    ~dbg_registers_debug_target=0 (internal buffer) */
 #define DBG_REG_WRAP_ON_INT_BUFFER				 0xc128
@@ -4944,6 +4966,7 @@
 #define EMAC_RX_MODE_PROMISCUOUS				 (1L<<8)
 #define EMAC_RX_MTU_SIZE_JUMBO_ENA				 (1L<<31)
 #define EMAC_TX_MODE_EXT_PAUSE_EN				 (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN					 (1L<<4)
 #define MISC_REGISTERS_GPIO_0					 0
 #define MISC_REGISTERS_GPIO_1					 1
 #define MISC_REGISTERS_GPIO_2					 2
@@ -4959,6 +4982,7 @@
 #define MISC_REGISTERS_GPIO_PORT_SHIFT				 4
 #define MISC_REGISTERS_GPIO_SET_POS				 8
 #define MISC_REGISTERS_RESET_REG_1_CLEAR			 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_NIG			 (0x1<<7)
 #define MISC_REGISTERS_RESET_REG_1_SET				 0x584
 #define MISC_REGISTERS_RESET_REG_2_CLEAR			 0x598
 #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0			 (0x1<<0)
@@ -4993,7 +5017,9 @@
 #define HW_LOCK_MAX_RESOURCE_VALUE				 31
 #define HW_LOCK_RESOURCE_8072_MDIO				 0
 #define HW_LOCK_RESOURCE_GPIO					 1
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 			 3
 #define HW_LOCK_RESOURCE_SPIO					 2
+#define HW_LOCK_RESOURCE_UNDI					 5
 #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR		      (1<<18)
 #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT		      (1<<31)
 #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT		      (1<<9)
@@ -5144,59 +5170,73 @@
 #define GRCBASE_MISC_AEU	GRCBASE_MISC
 
 
-/*the offset of the configuration space in the pci core register*/
+/* offset of configuration space in the pci core register */
 #define PCICFG_OFFSET					0x2000
 #define PCICFG_VENDOR_ID_OFFSET 			0x00
 #define PCICFG_DEVICE_ID_OFFSET 			0x02
 #define PCICFG_COMMAND_OFFSET				0x04
+#define PCICFG_COMMAND_IO_SPACE 		(1<<0)
+#define PCICFG_COMMAND_MEM_SPACE		(1<<1)
+#define PCICFG_COMMAND_BUS_MASTER		(1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES		(1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES		(1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP		(1<<5)
+#define PCICFG_COMMAND_PERR_ENA 		(1<<6)
+#define PCICFG_COMMAND_STEPPING 		(1<<7)
+#define PCICFG_COMMAND_SERR_ENA 		(1<<8)
+#define PCICFG_COMMAND_FAST_B2B 		(1<<9)
+#define PCICFG_COMMAND_INT_DISABLE		(1<<10)
+#define PCICFG_COMMAND_RESERVED 		(0x1f<<11)
 #define PCICFG_STATUS_OFFSET				0x06
-#define PCICFG_REVESION_ID				    0x08
+#define PCICFG_REVESION_ID				0x08
 #define PCICFG_CACHE_LINE_SIZE				0x0c
 #define PCICFG_LATENCY_TIMER				0x0d
-#define PCICFG_BAR_1_LOW				    0x10
-#define PCICFG_BAR_1_HIGH				    0x14
-#define PCICFG_BAR_2_LOW				    0x18
-#define PCICFG_BAR_2_HIGH				    0x1c
-#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET	0x2c
+#define PCICFG_BAR_1_LOW				0x10
+#define PCICFG_BAR_1_HIGH				0x14
+#define PCICFG_BAR_2_LOW				0x18
+#define PCICFG_BAR_2_HIGH				0x1c
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET		0x2c
 #define PCICFG_SUBSYSTEM_ID_OFFSET			0x2e
-#define PCICFG_INT_LINE 				    0x3c
-#define PCICFG_INT_PIN					    0x3d
-#define PCICFG_PM_CSR_OFFSET			0x4c
-#define PCICFG_GRC_ADDRESS				    0x78
-#define PCICFG_GRC_DATA 				    0x80
+#define PCICFG_INT_LINE 				0x3c
+#define PCICFG_INT_PIN					0x3d
+#define PCICFG_PM_CAPABILITY				0x48
+#define PCICFG_PM_CAPABILITY_VERSION		(0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK		(1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED		(1<<20)
+#define PCICFG_PM_CAPABILITY_DSI		(1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT	(0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT 	(1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT 	(1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0		(1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1		(1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2		(1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT	(1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD	(1<<31)
+#define PCICFG_PM_CSR_OFFSET				0x4c
+#define PCICFG_PM_CSR_STATE			(0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE		(1<<8)
+#define PCICFG_PM_CSR_PME_STATUS		(1<<15)
+#define PCICFG_GRC_ADDRESS				0x78
+#define PCICFG_GRC_DATA 				0x80
 #define PCICFG_DEVICE_CONTROL				0xb4
 #define PCICFG_LINK_CONTROL				0xbc
 
-#define PCICFG_COMMAND_IO_SPACE 		    (1<<0)
-#define PCICFG_COMMAND_MEM_SPACE		    (1<<1)
-#define PCICFG_COMMAND_BUS_MASTER		    (1<<2)
-#define PCICFG_COMMAND_SPECIAL_CYCLES		    (1<<3)
-#define PCICFG_COMMAND_MWI_CYCLES		    (1<<4)
-#define PCICFG_COMMAND_VGA_SNOOP		    (1<<5)
-#define PCICFG_COMMAND_PERR_ENA 		    (1<<6)
-#define PCICFG_COMMAND_STEPPING 		    (1<<7)
-#define PCICFG_COMMAND_SERR_ENA 		    (1<<8)
-#define PCICFG_COMMAND_FAST_B2B 		    (1<<9)
-#define PCICFG_COMMAND_INT_DISABLE		    (1<<10)
-#define PCICFG_COMMAND_RESERVED 		    (0x1f<<11)
-
-#define PCICFG_PM_CSR_STATE			    (0x3<<0)
-#define PCICFG_PM_CSR_PME_STATUS		    (1<<15)
 
 #define BAR_USTRORM_INTMEM				0x400000
 #define BAR_CSTRORM_INTMEM				0x410000
 #define BAR_XSTRORM_INTMEM				0x420000
 #define BAR_TSTRORM_INTMEM				0x430000
 
+/* for accessing the IGU in case of status block ACK */
 #define BAR_IGU_INTMEM					0x440000
 
 #define BAR_DOORBELL_OFFSET				0x800000
 
 #define BAR_ME_REGISTER 				0x450000
 
-
-#define GRC_CONFIG_2_SIZE_REG		    0x408 /* config_2 offset */
-#define PCI_CONFIG_2_BAR1_SIZE			    (0xfL<<0)
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG				0x408
+#define PCI_CONFIG_2_BAR1_SIZE			(0xfL<<0)
 #define PCI_CONFIG_2_BAR1_SIZE_DISABLED 	(0L<<0)
 #define PCI_CONFIG_2_BAR1_SIZE_64K		(1L<<0)
 #define PCI_CONFIG_2_BAR1_SIZE_128K		(2L<<0)
@@ -5213,11 +5253,11 @@
 #define PCI_CONFIG_2_BAR1_SIZE_256M		(13L<<0)
 #define PCI_CONFIG_2_BAR1_SIZE_512M		(14L<<0)
 #define PCI_CONFIG_2_BAR1_SIZE_1G		(15L<<0)
-#define PCI_CONFIG_2_BAR1_64ENA 		    (1L<<4)
-#define PCI_CONFIG_2_EXP_ROM_RETRY		    (1L<<5)
-#define PCI_CONFIG_2_CFG_CYCLE_RETRY		    (1L<<6)
-#define PCI_CONFIG_2_FIRST_CFG_DONE		    (1L<<7)
-#define PCI_CONFIG_2_EXP_ROM_SIZE		    (0xffL<<8)
+#define PCI_CONFIG_2_BAR1_64ENA 		(1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY		(1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY		(1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE		(1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE		(0xffL<<8)
 #define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED	(0L<<8)
 #define PCI_CONFIG_2_EXP_ROM_SIZE_2K		(1L<<8)
 #define PCI_CONFIG_2_EXP_ROM_SIZE_4K		(2L<<8)
@@ -5234,46 +5274,44 @@
 #define PCI_CONFIG_2_EXP_ROM_SIZE_8M		(13L<<8)
 #define PCI_CONFIG_2_EXP_ROM_SIZE_16M		(14L<<8)
 #define PCI_CONFIG_2_EXP_ROM_SIZE_32M		(15L<<8)
-#define PCI_CONFIG_2_BAR_PREFETCH		    (1L<<16)
-#define PCI_CONFIG_2_RESERVED0			    (0x7fffL<<17)
+#define PCI_CONFIG_2_BAR_PREFETCH		(1L<<16)
+#define PCI_CONFIG_2_RESERVED0			(0x7fffL<<17)
 
 /* config_3 offset */
-#define GRC_CONFIG_3_SIZE_REG				(0x40c)
-#define PCI_CONFIG_3_STICKY_BYTE		    (0xffL<<0)
-#define PCI_CONFIG_3_FORCE_PME			    (1L<<24)
-#define PCI_CONFIG_3_PME_STATUS 		    (1L<<25)
-#define PCI_CONFIG_3_PME_ENABLE 		    (1L<<26)
-#define PCI_CONFIG_3_PM_STATE			    (0x3L<<27)
-#define PCI_CONFIG_3_VAUX_PRESET		    (1L<<30)
-#define PCI_CONFIG_3_PCI_POWER			    (1L<<31)
-
-/* config_2 offset */
-#define GRC_CONFIG_2_SIZE_REG		    0x408
+#define GRC_CONFIG_3_SIZE_REG				0x40c
+#define PCI_CONFIG_3_STICKY_BYTE		(0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME			(1L<<24)
+#define PCI_CONFIG_3_PME_STATUS 		(1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE 		(1L<<26)
+#define PCI_CONFIG_3_PM_STATE			(0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET		(1L<<30)
+#define PCI_CONFIG_3_PCI_POWER			(1L<<31)
 
 #define GRC_BAR2_CONFIG 				0x4e0
-#define PCI_CONFIG_2_BAR2_SIZE			    (0xfL<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_DISABLED 	    (0L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_64K		    (1L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_128K		    (2L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_256K		    (3L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_512K		    (4L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_1M		    (5L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_2M		    (6L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_4M		    (7L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_8M		    (8L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_16M		    (9L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_32M		    (10L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_64M		    (11L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_128M		    (12L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_256M		    (13L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_512M		    (14L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_1G		    (15L<<0)
-#define PCI_CONFIG_2_BAR2_64ENA 		    (1L<<4)
+#define PCI_CONFIG_2_BAR2_SIZE			(0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED 	(0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K		(1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K		(2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K		(3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K		(4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M		(5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M		(6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M		(7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M		(8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M		(9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M		(10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M		(11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M		(12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M		(13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M		(14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G		(15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA 		(1L<<4)
 
-#define PCI_PM_DATA_A					(0x410)
-#define PCI_PM_DATA_B					(0x414)
-#define PCI_ID_VAL1					(0x434)
-#define PCI_ID_VAL2					(0x438)
+#define PCI_PM_DATA_A					0x410
+#define PCI_PM_DATA_B					0x414
+#define PCI_ID_VAL1					0x434
+#define PCI_ID_VAL2					0x438
+
 
 #define MDIO_REG_BANK_CL73_IEEEB0			0x0
 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL		0x0
@@ -5522,6 +5560,8 @@
 #define MDIO_PMA_REG_GEN_CTRL		0xca10
 #define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP	0x0188
 #define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET		0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG	0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG	0xca13
 #define MDIO_PMA_REG_ROM_VER1		0xca19
 #define MDIO_PMA_REG_ROM_VER2		0xca1a
 #define MDIO_PMA_REG_EDC_FFE_MAIN	0xca1b
@@ -5576,7 +5616,8 @@
 #define MDIO_AN_REG_LINK_STATUS 	0x8304
 #define MDIO_AN_REG_CL37_CL73		0x8370
 #define MDIO_AN_REG_CL37_AN		0xffe0
-#define MDIO_AN_REG_CL37_FD		0xffe4
+#define MDIO_AN_REG_CL37_FC_LD		0xffe4
+#define MDIO_AN_REG_CL37_FC_LP		0xffe5
 
 
 #define IGU_FUNC_BASE			0x0400
@@ -5600,4 +5641,13 @@
 #define IGU_INT_NOP				2
 #define IGU_INT_NOP2			3
 
+#define COMMAND_REG_INT_ACK	    0x0
+#define COMMAND_REG_PROD_UPD	    0x4
+#define COMMAND_REG_ATTN_BITS_UPD   0x8
+#define COMMAND_REG_ATTN_BITS_SET   0xc
+#define COMMAND_REG_ATTN_BITS_CLR   0x10
+#define COMMAND_REG_COALESCE_NOW    0x14
+#define COMMAND_REG_SIMD_MASK	    0x18
+#define COMMAND_REG_SIMD_NOMASK     0x1c
+
 
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 73a86d0..9c12924 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -7,13 +7,13 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/device.h>
+#include <linux/of_device.h>
 
 #include <asm/system.h>
 #include <asm/sbus.h>
 #include <asm/dma.h>
 #include <asm/oplib.h>
 #include <asm/prom.h>
-#include <asm/of_device.h>
 #include <asm/bpp.h>
 #include <asm/irq.h>
 
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index aeeec55..e41766d 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -17,11 +17,11 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/of_device.h>
 
 #include <asm/hypervisor.h>
 #include <asm/spitfire.h>
 #include <asm/prom.h>
-#include <asm/of_device.h>
 #include <asm/irq.h>
 
 #if defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 15ee497..29b4458 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -32,11 +32,11 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/of_device.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/prom.h>
-#include <asm/of_device.h>
 
 #if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
 #define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index e24e682..a378464 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -35,11 +35,11 @@
 #include <linux/serial_reg.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/of_device.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/prom.h>
-#include <asm/of_device.h>
 
 #if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
 #define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 0f3d69b..3cb4c8a 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -32,11 +32,11 @@
 #include <linux/serio.h>
 #endif
 #include <linux/init.h>
+#include <linux/of_device.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/prom.h>
-#include <asm/of_device.h>
 
 #if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
 #define SUPPORT_SYSRQ
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 755823c..bcefbdd 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -95,16 +95,18 @@
 
 source "drivers/usb/core/Kconfig"
 
+source "drivers/usb/mon/Kconfig"
+
 source "drivers/usb/host/Kconfig"
 
+source "drivers/usb/musb/Kconfig"
+
 source "drivers/usb/class/Kconfig"
 
 source "drivers/usb/storage/Kconfig"
 
 source "drivers/usb/image/Kconfig"
 
-source "drivers/usb/mon/Kconfig"
-
 comment "USB port drivers"
 	depends on USB
 
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 507a9bd..9aea43a 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -602,7 +602,7 @@
 			offd = le32_to_cpu(buf[offb++]);
 			if (offd >= size) {
 				if (printk_ratelimit())
-					usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n",
+					usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n",
 						offd, cm);
 				ret = -EIO;
 				goto cleanup;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0725b18..efc4373 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -51,6 +51,7 @@
  */
 
 #undef DEBUG
+#undef VERBOSE_DEBUG
 
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -70,6 +71,9 @@
 
 #include "cdc-acm.h"
 
+
+#define ACM_CLOSE_TIMEOUT	15	/* seconds to let writes drain */
+
 /*
  * Version Information
  */
@@ -85,6 +89,12 @@
 
 #define ACM_READY(acm)	(acm && acm->dev && acm->used)
 
+#ifdef VERBOSE_DEBUG
+#define verbose	1
+#else
+#define verbose	0
+#endif
+
 /*
  * Functions for ACM control messages.
  */
@@ -136,19 +146,17 @@
 static int acm_wb_is_avail(struct acm *acm)
 {
 	int i, n;
+	unsigned long flags;
 
 	n = ACM_NW;
+	spin_lock_irqsave(&acm->write_lock, flags);
 	for (i = 0; i < ACM_NW; i++) {
 		n -= acm->wb[i].use;
 	}
+	spin_unlock_irqrestore(&acm->write_lock, flags);
 	return n;
 }
 
-static inline int acm_wb_is_used(struct acm *acm, int wbn)
-{
-	return acm->wb[wbn].use;
-}
-
 /*
  * Finish write.
  */
@@ -157,7 +165,6 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&acm->write_lock, flags);
-	acm->write_ready = 1;
 	wb->use = 0;
 	acm->transmitting--;
 	spin_unlock_irqrestore(&acm->write_lock, flags);
@@ -190,40 +197,25 @@
 static int acm_write_start(struct acm *acm, int wbn)
 {
 	unsigned long flags;
-	struct acm_wb *wb;
+	struct acm_wb *wb = &acm->wb[wbn];
 	int rc;
 
 	spin_lock_irqsave(&acm->write_lock, flags);
 	if (!acm->dev) {
+		wb->use = 0;
 		spin_unlock_irqrestore(&acm->write_lock, flags);
 		return -ENODEV;
 	}
 
-	if (!acm->write_ready) {
-		spin_unlock_irqrestore(&acm->write_lock, flags);
-		return 0;	/* A white lie */
-	}
-
-	wb = &acm->wb[wbn];
-	if(acm_wb_is_avail(acm) <= 1)
-		acm->write_ready = 0;
-
 	dbg("%s susp_count: %d", __func__, acm->susp_count);
 	if (acm->susp_count) {
-		acm->old_ready = acm->write_ready;
 		acm->delayed_wb = wb;
-		acm->write_ready = 0;
 		schedule_work(&acm->waker);
 		spin_unlock_irqrestore(&acm->write_lock, flags);
 		return 0;	/* A white lie */
 	}
 	usb_mark_last_busy(acm->dev);
 
-	if (!acm_wb_is_used(acm, wbn)) {
-		spin_unlock_irqrestore(&acm->write_lock, flags);
-		return 0;
-	}
-
 	rc = acm_start_wb(acm, wb);
 	spin_unlock_irqrestore(&acm->write_lock, flags);
 
@@ -488,22 +480,28 @@
 /* data interface wrote those outgoing bytes */
 static void acm_write_bulk(struct urb *urb)
 {
-	struct acm *acm;
 	struct acm_wb *wb = urb->context;
+	struct acm *acm = wb->instance;
 
-	dbg("Entering acm_write_bulk with status %d", urb->status);
+	if (verbose || urb->status
+			|| (urb->actual_length != urb->transfer_buffer_length))
+		dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n",
+			urb->actual_length,
+			urb->transfer_buffer_length,
+			urb->status);
 
-	acm = wb->instance;
 	acm_write_done(acm, wb);
 	if (ACM_READY(acm))
 		schedule_work(&acm->work);
+	else
+		wake_up_interruptible(&acm->drain_wait);
 }
 
 static void acm_softint(struct work_struct *work)
 {
 	struct acm *acm = container_of(work, struct acm, work);
-	dbg("Entering acm_softint.");
-	
+
+	dev_vdbg(&acm->data->dev, "tx work\n");
 	if (!ACM_READY(acm))
 		return;
 	tty_wakeup(acm->tty);
@@ -512,7 +510,6 @@
 static void acm_waker(struct work_struct *waker)
 {
 	struct acm *acm = container_of(waker, struct acm, waker);
-	long flags;
 	int rv;
 
 	rv = usb_autopm_get_interface(acm->control);
@@ -524,9 +521,6 @@
 		acm_start_wb(acm, acm->delayed_wb);
 		acm->delayed_wb = NULL;
 	}
-	spin_lock_irqsave(&acm->write_lock, flags);
-	acm->write_ready = acm->old_ready;
-	spin_unlock_irqrestore(&acm->write_lock, flags);
 	usb_autopm_put_interface(acm->control);
 }
 
@@ -628,6 +622,8 @@
 	kfree(acm);
 }
 
+static int acm_tty_chars_in_buffer(struct tty_struct *tty);
+
 static void acm_tty_close(struct tty_struct *tty, struct file *filp)
 {
 	struct acm *acm = tty->driver_data;
@@ -642,6 +638,13 @@
 		if (acm->dev) {
 			usb_autopm_get_interface(acm->control);
 			acm_set_control(acm, acm->ctrlout = 0);
+
+			/* try letting the last writes drain naturally */
+			wait_event_interruptible_timeout(acm->drain_wait,
+					(ACM_NW == acm_wb_is_avail(acm))
+						|| !acm->dev,
+					ACM_CLOSE_TIMEOUT * HZ);
+
 			usb_kill_urb(acm->ctrlurb);
 			for (i = 0; i < ACM_NW; i++)
 				usb_kill_urb(acm->wb[i].urb);
@@ -697,7 +700,7 @@
 	 * Do not let the line discipline to know that we have a reserve,
 	 * or it might get too enthusiastic.
 	 */
-	return (acm->write_ready && acm_wb_is_avail(acm)) ? acm->writesize : 0;
+	return acm_wb_is_avail(acm) ? acm->writesize : 0;
 }
 
 static int acm_tty_chars_in_buffer(struct tty_struct *tty)
@@ -1072,11 +1075,11 @@
 	acm->urb_task.data = (unsigned long) acm;
 	INIT_WORK(&acm->work, acm_softint);
 	INIT_WORK(&acm->waker, acm_waker);
+	init_waitqueue_head(&acm->drain_wait);
 	spin_lock_init(&acm->throttle_lock);
 	spin_lock_init(&acm->write_lock);
 	spin_lock_init(&acm->read_lock);
 	mutex_init(&acm->mutex);
-	acm->write_ready = 1;
 	acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
 
 	buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
@@ -1108,9 +1111,11 @@
 		rcv->instance = acm;
 	}
 	for (i = 0; i < num_rx_buf; i++) {
-		struct acm_rb *buf = &(acm->rb[i]);
+		struct acm_rb *rb = &(acm->rb[i]);
 
-		if (!(buf->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &buf->dma))) {
+		rb->base = usb_buffer_alloc(acm->dev, readsize,
+				GFP_KERNEL, &rb->dma);
+		if (!rb->base) {
 			dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n");
 			goto alloc_fail7;
 		}
@@ -1172,6 +1177,7 @@
 	acm_set_line(acm, &acm->line);
 
 	usb_driver_claim_interface(&acm_driver, data_interface, acm);
+	usb_set_intfdata(data_interface, acm);
 
 	usb_get_intf(control_interface);
 	tty_register_device(acm_tty_driver, minor, &control_interface->dev);
@@ -1221,11 +1227,11 @@
 	struct acm *acm = usb_get_intfdata(intf);
 	struct usb_device *usb_dev = interface_to_usbdev(intf);
 
-	mutex_lock(&open_mutex);
-	if (!acm || !acm->dev) {
-		mutex_unlock(&open_mutex);
+	/* sibling interface is already cleaning up */
+	if (!acm)
 		return;
-	}
+
+	mutex_lock(&open_mutex);
 	if (acm->country_codes){
 		device_remove_file(&acm->control->dev,
 				&dev_attr_wCountryCodes);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 85c3aaa..1f95e7a 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -106,8 +106,6 @@
 	struct list_head spare_read_bufs;
 	struct list_head filled_read_bufs;
 	int write_used;					/* number of non-empty write buffers */
-	int write_ready;				/* write urb is not running */
-	int old_ready;
 	int processing;
 	int transmitting;
 	spinlock_t write_lock;
@@ -115,6 +113,7 @@
 	struct usb_cdc_line_coding line;		/* bits, stop, parity */
 	struct work_struct work;			/* work queue entry for line discipline waking up */
 	struct work_struct waker;
+	wait_queue_head_t drain_wait;			/* close processing */
 	struct tasklet_struct urb_task;                 /* rx processing */
 	spinlock_t throttle_lock;			/* synchronize throtteling and read callback */
 	unsigned int ctrlin;				/* input control lines (DCD, DSR, RI, break, overruns) */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ddb54e1..2be37fe 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -774,7 +774,6 @@
 }
 EXPORT_SYMBOL_GPL(usb_deregister);
 
-
 /* Forced unbinding of a USB interface driver, either because
  * it doesn't support pre_reset/post_reset/reset_resume or
  * because it doesn't support suspend/resume.
@@ -821,6 +820,8 @@
 		dev_warn(&intf->dev, "rebind failed: %d\n", rc);
 }
 
+#ifdef CONFIG_PM
+
 #define DO_UNBIND	0
 #define DO_REBIND	1
 
@@ -872,8 +873,6 @@
 	}
 }
 
-#ifdef CONFIG_PM
-
 /* Caller has locked udev's pm_mutex */
 static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
 {
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 586d6f1..286b443 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1091,8 +1091,8 @@
 				continue;
 			dev_dbg(&dev->dev, "unregistering interface %s\n",
 				dev_name(&interface->dev));
-			device_del(&interface->dev);
 			usb_remove_sysfs_intf_files(interface);
+			device_del(&interface->dev);
 		}
 
 		/* Now that the interfaces are unbound, nobody should
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c6a8c6b..acc95b2 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -284,6 +284,16 @@
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
+# built in ../musb along with host support
+config USB_GADGET_MUSB_HDRC
+	boolean "Inventra HDRC USB Peripheral (TI, ...)"
+	depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+	select USB_GADGET_DUALSPEED
+	select USB_GADGET_SELECTED
+	help
+	  This OTG-capable silicon IP is used in dual designs including
+	  the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
+
 config USB_GADGET_OMAP
 	boolean "OMAP USB Device Controller"
 	depends on ARCH_OMAP
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 21d1406..7600a0c 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -542,13 +542,14 @@
 		req->req.context = dum;
 		req->req.complete = fifo_complete;
 
+		list_add_tail(&req->queue, &ep->queue);
 		spin_unlock (&dum->lock);
 		_req->actual = _req->length;
 		_req->status = 0;
 		_req->complete (_ep, _req);
 		spin_lock (&dum->lock);
-	}
-	list_add_tail (&req->queue, &ep->queue);
+	}  else
+		list_add_tail(&req->queue, &ep->queue);
 	spin_unlock_irqrestore (&dum->lock, flags);
 
 	/* real hardware would likely enable transfers here, in case
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d8faccf..5ee1590 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -47,18 +47,37 @@
 	u8				ctrl_id, data_id;
 	u8				port_num;
 
-	struct usb_descriptor_header	**fs_function;
+	u8				pending;
+
+	/* lock is mostly for pending and notify_req ... they get accessed
+	 * by callbacks both from tty (open/close/break) under its spinlock,
+	 * and notify_req.complete() which can't use that lock.
+	 */
+	spinlock_t			lock;
+
 	struct acm_ep_descs		fs;
-	struct usb_descriptor_header	**hs_function;
 	struct acm_ep_descs		hs;
 
 	struct usb_ep			*notify;
 	struct usb_endpoint_descriptor	*notify_desc;
+	struct usb_request		*notify_req;
 
 	struct usb_cdc_line_coding	port_line_coding;	/* 8-N-1 etc */
+
+	/* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
 	u16				port_handshake_bits;
-#define RS232_RTS	(1 << 1)	/* unused with full duplex */
-#define RS232_DTR	(1 << 0)	/* host is ready for data r/w */
+#define ACM_CTRL_RTS	(1 << 1)	/* unused with full duplex */
+#define ACM_CTRL_DTR	(1 << 0)	/* host is ready for data r/w */
+
+	/* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
+	u16				serial_state;
+#define ACM_CTRL_OVERRUN	(1 << 6)
+#define ACM_CTRL_PARITY		(1 << 5)
+#define ACM_CTRL_FRAMING	(1 << 4)
+#define ACM_CTRL_RI		(1 << 3)
+#define ACM_CTRL_BRK		(1 << 2)
+#define ACM_CTRL_DSR		(1 << 1)
+#define ACM_CTRL_DCD		(1 << 0)
 };
 
 static inline struct f_acm *func_to_acm(struct usb_function *f)
@@ -66,12 +85,17 @@
 	return container_of(f, struct f_acm, port.func);
 }
 
+static inline struct f_acm *port_to_acm(struct gserial *p)
+{
+	return container_of(p, struct f_acm, port);
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* notification endpoint uses smallish and infrequent fixed-size messages */
 
 #define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
-#define GS_NOTIFY_MAXPACKET		8
+#define GS_NOTIFY_MAXPACKET		10	/* notification + 2 bytes */
 
 /* interface and class descriptors: */
 
@@ -117,7 +141,7 @@
 	.bLength =		sizeof(acm_descriptor),
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
-	.bmCapabilities =	(1 << 1),
+	.bmCapabilities =	USB_CDC_CAP_LINE,
 };
 
 static struct usb_cdc_union_desc acm_union_desc __initdata = {
@@ -277,6 +301,11 @@
 
 	/* composite driver infrastructure handles everything except
 	 * CDC class messages; interface activation uses set_alt().
+	 *
+	 * Note CDC spec table 4 lists the ACM request profile.  It requires
+	 * encapsulated command support ... we don't handle any, and respond
+	 * to them by stalling.  Options include get/set/clear comm features
+	 * (not that useful) and SEND_BREAK.
 	 */
 	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
 
@@ -312,7 +341,7 @@
 		value = 0;
 
 		/* FIXME we should not allow data to flow until the
-		 * host sets the RS232_DTR bit; and when it clears
+		 * host sets the ACM_CTRL_DTR bit; and when it clears
 		 * that bit, we should return to that no-flow state.
 		 */
 		acm->port_handshake_bits = w_value;
@@ -350,9 +379,6 @@
 	/* we know alt == 0, so this is an activation or a reset */
 
 	if (intf == acm->ctrl_id) {
-		/* REVISIT this may need more work when we start to
-		 * send notifications ...
-		 */
 		if (acm->notify->driver_data) {
 			VDBG(cdev, "reset acm control interface %d\n", intf);
 			usb_ep_disable(acm->notify);
@@ -397,6 +423,128 @@
 
 /*-------------------------------------------------------------------------*/
 
+/**
+ * acm_cdc_notify - issue CDC notification to host
+ * @acm: wraps host to be notified
+ * @type: notification type
+ * @value: Refer to cdc specs, wValue field.
+ * @data: data to be sent
+ * @length: size of data
+ * Context: irqs blocked, acm->lock held, acm_notify_req non-null
+ *
+ * Returns zero on sucess or a negative errno.
+ *
+ * See section 6.3.5 of the CDC 1.1 specification for information
+ * about the only notification we issue:  SerialState change.
+ */
+static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
+		void *data, unsigned length)
+{
+	struct usb_ep			*ep = acm->notify;
+	struct usb_request		*req;
+	struct usb_cdc_notification	*notify;
+	const unsigned			len = sizeof(*notify) + length;
+	void				*buf;
+	int				status;
+
+	req = acm->notify_req;
+	acm->notify_req = NULL;
+	acm->pending = false;
+
+	req->length = len;
+	notify = req->buf;
+	buf = notify + 1;
+
+	notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	notify->bNotificationType = type;
+	notify->wValue = cpu_to_le16(value);
+	notify->wIndex = cpu_to_le16(acm->ctrl_id);
+	notify->wLength = cpu_to_le16(length);
+	memcpy(buf, data, length);
+
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (status < 0) {
+		ERROR(acm->port.func.config->cdev,
+				"acm ttyGS%d can't notify serial state, %d\n",
+				acm->port_num, status);
+		acm->notify_req = req;
+	}
+
+	return status;
+}
+
+static int acm_notify_serial_state(struct f_acm *acm)
+{
+	struct usb_composite_dev *cdev = acm->port.func.config->cdev;
+	int			status;
+
+	spin_lock(&acm->lock);
+	if (acm->notify_req) {
+		DBG(cdev, "acm ttyGS%d serial state %04x\n",
+				acm->port_num, acm->serial_state);
+		status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
+				0, &acm->serial_state, sizeof(acm->serial_state));
+	} else {
+		acm->pending = true;
+		status = 0;
+	}
+	spin_unlock(&acm->lock);
+	return status;
+}
+
+static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_acm		*acm = req->context;
+	u8			doit = false;
+
+	/* on this call path we do NOT hold the port spinlock,
+	 * which is why ACM needs its own spinlock
+	 */
+	spin_lock(&acm->lock);
+	if (req->status != -ESHUTDOWN)
+		doit = acm->pending;
+	acm->notify_req = req;
+	spin_unlock(&acm->lock);
+
+	if (doit)
+		acm_notify_serial_state(acm);
+}
+
+/* connect == the TTY link is open */
+
+static void acm_connect(struct gserial *port)
+{
+	struct f_acm		*acm = port_to_acm(port);
+
+	acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+	acm_notify_serial_state(acm);
+}
+
+static void acm_disconnect(struct gserial *port)
+{
+	struct f_acm		*acm = port_to_acm(port);
+
+	acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+	acm_notify_serial_state(acm);
+}
+
+static int acm_send_break(struct gserial *port, int duration)
+{
+	struct f_acm		*acm = port_to_acm(port);
+	u16			state;
+
+	state = acm->serial_state;
+	state &= ~ACM_CTRL_BRK;
+	if (duration)
+		state |= ACM_CTRL_BRK;
+
+	acm->serial_state = state;
+	return acm_notify_serial_state(acm);
+}
+
+/*-------------------------------------------------------------------------*/
+
 /* ACM function driver setup/binding */
 static int __init
 acm_bind(struct usb_configuration *c, struct usb_function *f)
@@ -445,8 +593,20 @@
 	acm->notify = ep;
 	ep->driver_data = cdev;	/* claim */
 
+	/* allocate notification */
+	acm->notify_req = gs_alloc_req(ep,
+			sizeof(struct usb_cdc_notification) + 2,
+			GFP_KERNEL);
+	if (!acm->notify_req)
+		goto fail;
+
+	acm->notify_req->complete = acm_cdc_notify_complete;
+	acm->notify_req->context = acm;
+
 	/* copy descriptors, and track endpoint copies */
 	f->descriptors = usb_copy_descriptors(acm_fs_function);
+	if (!f->descriptors)
+		goto fail;
 
 	acm->fs.in = usb_find_endpoint(acm_fs_function,
 			f->descriptors, &acm_fs_in_desc);
@@ -478,8 +638,6 @@
 				f->hs_descriptors, &acm_hs_notify_desc);
 	}
 
-	/* FIXME provide a callback for triggering notifications */
-
 	DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
 			acm->port_num,
 			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@@ -488,6 +646,9 @@
 	return 0;
 
 fail:
+	if (acm->notify_req)
+		gs_free_req(acm->notify, acm->notify_req);
+
 	/* we might as well release our claims on endpoints */
 	if (acm->notify)
 		acm->notify->driver_data = NULL;
@@ -504,10 +665,13 @@
 static void
 acm_unbind(struct usb_configuration *c, struct usb_function *f)
 {
+	struct f_acm		*acm = func_to_acm(f);
+
 	if (gadget_is_dualspeed(c->cdev->gadget))
 		usb_free_descriptors(f->hs_descriptors);
 	usb_free_descriptors(f->descriptors);
-	kfree(func_to_acm(f));
+	gs_free_req(acm->notify, acm->notify_req);
+	kfree(acm);
 }
 
 /* Some controllers can't support CDC ACM ... */
@@ -571,8 +735,14 @@
 	if (!acm)
 		return -ENOMEM;
 
+	spin_lock_init(&acm->lock);
+
 	acm->port_num = port_num;
 
+	acm->port.connect = acm_connect;
+	acm->port.disconnect = acm_disconnect;
+	acm->port.send_break = acm_send_break;
+
 	acm->port.func.name = "acm";
 	acm->port.func.strings = acm_strings;
 	/* descriptors are per-instance copies */
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 0822e9d..a2b5c09 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -63,9 +63,7 @@
 
 	char				ethaddr[14];
 
-	struct usb_descriptor_header	**fs_function;
 	struct ecm_ep_descs		fs;
-	struct usb_descriptor_header	**hs_function;
 	struct ecm_ep_descs		hs;
 
 	struct usb_ep			*notify;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 61652f0..659b3d9 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -85,9 +85,7 @@
 	u8				ethaddr[ETH_ALEN];
 	int				config;
 
-	struct usb_descriptor_header	**fs_function;
 	struct rndis_ep_descs		fs;
-	struct usb_descriptor_header	**hs_function;
 	struct rndis_ep_descs		hs;
 
 	struct usb_ep			*notify;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 1b6bde9..fe5674db 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -36,9 +36,7 @@
 	u8				data_id;
 	u8				port_num;
 
-	struct usb_descriptor_header	**fs_function;
 	struct gser_descs		fs;
-	struct usb_descriptor_header	**hs_function;
 	struct gser_descs		hs;
 };
 
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index afeab9a..acb8d23 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -66,9 +66,7 @@
 
 	char				ethaddr[14];
 
-	struct usb_descriptor_header	**fs_function;
 	struct geth_descs		fs;
-	struct usb_descriptor_header	**hs_function;
 	struct geth_descs		hs;
 };
 
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 5246e8f..17d9905 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -11,6 +11,10 @@
  * Some are available on 2.4 kernels; several are available, but not
  * yet pushed in the 2.6 mainline tree.
  */
+
+#ifndef __GADGET_CHIPS_H
+#define __GADGET_CHIPS_H
+
 #ifdef CONFIG_USB_GADGET_NET2280
 #define	gadget_is_net2280(g)	!strcmp("net2280", (g)->name)
 #else
@@ -237,3 +241,5 @@
 	/* Everything else is *presumably* fine ... */
 	return true;
 }
+
+#endif /* __GADGET_CHIPS_H */
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 376e80c..574c538 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -54,6 +54,7 @@
 
 #include <mach/dma.h>
 #include <mach/usb.h>
+#include <mach/control.h>
 
 #include "omap_udc.h"
 
@@ -2310,10 +2311,10 @@
 	u32		trans;
 	char		*ctrl_name;
 
-	tmp = OTG_REV_REG;
+	tmp = omap_readl(OTG_REV);
 	if (cpu_is_omap24xx()) {
 		ctrl_name = "control_devconf";
-		trans = CONTROL_DEVCONF_REG;
+		trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
 	} else {
 		ctrl_name = "tranceiver_ctrl";
 		trans = omap_readw(USB_TRANSCEIVER_CTRL);
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index abf9505..53d5928 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -52,13 +52,16 @@
  * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
  */
 
+#define PREFIX	"ttyGS"
+
 /*
  * gserial is the lifecycle interface, used by USB functions
  * gs_port is the I/O nexus, used by the tty driver
  * tty_struct links to the tty/filesystem framework
  *
  * gserial <---> gs_port ... links will be null when the USB link is
- * inactive; managed by gserial_{connect,disconnect}().
+ * inactive; managed by gserial_{connect,disconnect}().  each gserial
+ * instance can wrap its own USB control protocol.
  *	gserial->ioport == usb_ep->driver_data ... gs_port
  *	gs_port->port_usb ... gserial
  *
@@ -100,6 +103,8 @@
 	wait_queue_head_t	close_wait;	/* wait for last close */
 
 	struct list_head	read_pool;
+	struct list_head	read_queue;
+	unsigned		n_read;
 	struct tasklet_struct	push;
 
 	struct list_head	write_pool;
@@ -177,7 +182,7 @@
 /*
  * gs_buf_data_avail
  *
- * Return the number of bytes of data available in the circular
+ * Return the number of bytes of data written into the circular
  * buffer.
  */
 static unsigned gs_buf_data_avail(struct gs_buf *gb)
@@ -278,7 +283,7 @@
  * Allocate a usb_request and its buffer.  Returns a pointer to the
  * usb_request or NULL if there is an error.
  */
-static struct usb_request *
+struct usb_request *
 gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
 {
 	struct usb_request *req;
@@ -302,7 +307,7 @@
  *
  * Free a usb_request and its buffer.
  */
-static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
+void gs_free_req(struct usb_ep *ep, struct usb_request *req)
 {
 	kfree(req->buf);
 	usb_ep_free_request(ep, req);
@@ -367,11 +372,9 @@
 		req->length = len;
 		list_del(&req->list);
 
-#ifdef VERBOSE_DEBUG
-		pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n",
-				__func__, in->name, len, *((u8 *)req->buf),
+		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
+				port->port_num, len, *((u8 *)req->buf),
 				*((u8 *)req->buf+1), *((u8 *)req->buf+2));
-#endif
 
 		/* Drop lock while we call out of driver; completions
 		 * could be issued while we do so.  Disconnection may
@@ -401,56 +404,6 @@
 	return status;
 }
 
-static void gs_rx_push(unsigned long _port)
-{
-	struct gs_port		*port = (void *)_port;
-	struct tty_struct	*tty = port->port_tty;
-
-	/* With low_latency, tty_flip_buffer_push() doesn't put its
-	 * real work through a workqueue, so the ldisc has a better
-	 * chance to keep up with peak USB data rates.
-	 */
-	if (tty) {
-		tty_flip_buffer_push(tty);
-		wake_up_interruptible(&tty->read_wait);
-	}
-}
-
-/*
- * gs_recv_packet
- *
- * Called for each USB packet received.  Reads the packet
- * header and stuffs the data in the appropriate tty buffer.
- * Returns 0 if successful, or a negative error number.
- *
- * Called during USB completion routine, on interrupt time.
- * With port_lock.
- */
-static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size)
-{
-	unsigned		len;
-	struct tty_struct	*tty;
-
-	/* I/O completions can continue for a while after close(), until the
-	 * request queue empties.  Just discard any data we receive, until
-	 * something reopens this TTY ... as if there were no HW flow control.
-	 */
-	tty = port->port_tty;
-	if (tty == NULL) {
-		pr_vdebug("%s: ttyGS%d, after close\n",
-				__func__, port->port_num);
-		return -EIO;
-	}
-
-	len = tty_insert_flip_string(tty, packet, size);
-	if (len > 0)
-		tasklet_schedule(&port->push);
-	if (len < size)
-		pr_debug("%s: ttyGS%d, drop %d bytes\n",
-				__func__, port->port_num, size - len);
-	return 0;
-}
-
 /*
  * Context: caller owns port_lock, and port_usb is set
  */
@@ -469,9 +422,9 @@
 		int			status;
 		struct tty_struct	*tty;
 
-		/* no more rx if closed or throttled */
+		/* no more rx if closed */
 		tty = port->port_tty;
-		if (!tty || test_bit(TTY_THROTTLED, &tty->flags))
+		if (!tty)
 			break;
 
 		req = list_entry(pool->next, struct usb_request, list);
@@ -500,36 +453,134 @@
 	return started;
 }
 
+/*
+ * RX tasklet takes data out of the RX queue and hands it up to the TTY
+ * layer until it refuses to take any more data (or is throttled back).
+ * Then it issues reads for any further data.
+ *
+ * If the RX queue becomes full enough that no usb_request is queued,
+ * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
+ * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
+ * can be buffered before the TTY layer's buffers (currently 64 KB).
+ */
+static void gs_rx_push(unsigned long _port)
+{
+	struct gs_port		*port = (void *)_port;
+	struct tty_struct	*tty;
+	struct list_head	*queue = &port->read_queue;
+	bool			disconnect = false;
+	bool			do_push = false;
+
+	/* hand any queued data to the tty */
+	spin_lock_irq(&port->port_lock);
+	tty = port->port_tty;
+	while (!list_empty(queue)) {
+		struct usb_request	*req;
+
+		req = list_first_entry(queue, struct usb_request, list);
+
+		/* discard data if tty was closed */
+		if (!tty)
+			goto recycle;
+
+		/* leave data queued if tty was rx throttled */
+		if (test_bit(TTY_THROTTLED, &tty->flags))
+			break;
+
+		switch (req->status) {
+		case -ESHUTDOWN:
+			disconnect = true;
+			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
+			break;
+
+		default:
+			/* presumably a transient fault */
+			pr_warning(PREFIX "%d: unexpected RX status %d\n",
+					port->port_num, req->status);
+			/* FALLTHROUGH */
+		case 0:
+			/* normal completion */
+			break;
+		}
+
+		/* push data to (open) tty */
+		if (req->actual) {
+			char		*packet = req->buf;
+			unsigned	size = req->actual;
+			unsigned	n;
+			int		count;
+
+			/* we may have pushed part of this packet already... */
+			n = port->n_read;
+			if (n) {
+				packet += n;
+				size -= n;
+			}
+
+			count = tty_insert_flip_string(tty, packet, size);
+			if (count)
+				do_push = true;
+			if (count != size) {
+				/* stop pushing; TTY layer can't handle more */
+				port->n_read += count;
+				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
+						port->port_num,
+						count, req->actual);
+				break;
+			}
+			port->n_read = 0;
+		}
+recycle:
+		list_move(&req->list, &port->read_pool);
+	}
+
+	/* Push from tty to ldisc; this is immediate with low_latency, and
+	 * may trigger callbacks to this driver ... so drop the spinlock.
+	 */
+	if (tty && do_push) {
+		spin_unlock_irq(&port->port_lock);
+		tty_flip_buffer_push(tty);
+		wake_up_interruptible(&tty->read_wait);
+		spin_lock_irq(&port->port_lock);
+
+		/* tty may have been closed */
+		tty = port->port_tty;
+	}
+
+
+	/* We want our data queue to become empty ASAP, keeping data
+	 * in the tty and ldisc (not here).  If we couldn't push any
+	 * this time around, there may be trouble unless there's an
+	 * implicit tty_unthrottle() call on its way...
+	 *
+	 * REVISIT we should probably add a timer to keep the tasklet
+	 * from starving ... but it's not clear that case ever happens.
+	 */
+	if (!list_empty(queue) && tty) {
+		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+			if (do_push)
+				tasklet_schedule(&port->push);
+			else
+				pr_warning(PREFIX "%d: RX not scheduled?\n",
+					port->port_num);
+		}
+	}
+
+	/* If we're still connected, refill the USB RX queue. */
+	if (!disconnect && port->port_usb)
+		gs_start_rx(port);
+
+	spin_unlock_irq(&port->port_lock);
+}
+
 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
 {
-	int		status;
 	struct gs_port	*port = ep->driver_data;
 
+	/* Queue all received data until the tty layer is ready for it. */
 	spin_lock(&port->port_lock);
-	list_add(&req->list, &port->read_pool);
-
-	switch (req->status) {
-	case 0:
-		/* normal completion */
-		status = gs_recv_packet(port, req->buf, req->actual);
-		if (status && status != -EIO)
-			pr_debug("%s: %s %s err %d\n",
-				__func__, "recv", ep->name, status);
-		gs_start_rx(port);
-		break;
-
-	case -ESHUTDOWN:
-		/* disconnect */
-		pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
-		break;
-
-	default:
-		/* presumably a transient fault */
-		pr_warning("%s: unexpected %s status %d\n",
-				__func__, ep->name, req->status);
-		gs_start_rx(port);
-		break;
-	}
+	list_add_tail(&req->list, &port->read_queue);
+	tasklet_schedule(&port->push);
 	spin_unlock(&port->port_lock);
 }
 
@@ -625,6 +676,7 @@
 	}
 
 	/* queue read requests */
+	port->n_read = 0;
 	started = gs_start_rx(port);
 
 	/* unblock any pending writes into our circular buffer */
@@ -633,9 +685,10 @@
 	} else {
 		gs_free_requests(ep, head);
 		gs_free_requests(port->port_usb->in, &port->write_pool);
+		status = -EIO;
 	}
 
-	return started ? 0 : status;
+	return status;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -736,10 +789,13 @@
 
 	/* if connected, start the I/O stream */
 	if (port->port_usb) {
+		struct gserial	*gser = port->port_usb;
+
 		pr_debug("gs_open: start ttyGS%d\n", port->port_num);
 		gs_start_io(port);
 
-		/* REVISIT for ACM, issue "network connected" event */
+		if (gser->connect)
+			gser->connect(gser);
 	}
 
 	pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
@@ -766,6 +822,7 @@
 static void gs_close(struct tty_struct *tty, struct file *file)
 {
 	struct gs_port *port = tty->driver_data;
+	struct gserial	*gser;
 
 	spin_lock_irq(&port->port_lock);
 
@@ -785,32 +842,31 @@
 	port->openclose = true;
 	port->open_count = 0;
 
-	if (port->port_usb)
-		/* REVISIT for ACM, issue "network disconnected" event */;
+	gser = port->port_usb;
+	if (gser && gser->disconnect)
+		gser->disconnect(gser);
 
 	/* wait for circular write buffer to drain, disconnect, or at
 	 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
 	 */
-	if (gs_buf_data_avail(&port->port_write_buf) > 0
-			&& port->port_usb) {
+	if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
 		spin_unlock_irq(&port->port_lock);
 		wait_event_interruptible_timeout(port->drain_wait,
 					gs_writes_finished(port),
 					GS_CLOSE_TIMEOUT * HZ);
 		spin_lock_irq(&port->port_lock);
+		gser = port->port_usb;
 	}
 
 	/* Iff we're disconnected, there can be no I/O in flight so it's
 	 * ok to free the circular buffer; else just scrub it.  And don't
 	 * let the push tasklet fire again until we're re-opened.
 	 */
-	if (port->port_usb == NULL)
+	if (gser == NULL)
 		gs_buf_free(&port->port_write_buf);
 	else
 		gs_buf_clear(&port->port_write_buf);
 
-	tasklet_kill(&port->push);
-
 	tty->driver_data = NULL;
 	port->port_tty = NULL;
 
@@ -911,15 +967,35 @@
 {
 	struct gs_port		*port = tty->driver_data;
 	unsigned long		flags;
-	unsigned		started = 0;
 
 	spin_lock_irqsave(&port->port_lock, flags);
-	if (port->port_usb)
-		started = gs_start_rx(port);
+	if (port->port_usb) {
+		/* Kickstart read queue processing.  We don't do xon/xoff,
+		 * rts/cts, or other handshaking with the host, but if the
+		 * read queue backs up enough we'll be NAKing OUT packets.
+		 */
+		tasklet_schedule(&port->push);
+		pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
+	}
 	spin_unlock_irqrestore(&port->port_lock, flags);
+}
 
-	pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n",
-			port->port_num, started);
+static int gs_break_ctl(struct tty_struct *tty, int duration)
+{
+	struct gs_port	*port = tty->driver_data;
+	int		status = 0;
+	struct gserial	*gser;
+
+	pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
+			port->port_num, duration);
+
+	spin_lock_irq(&port->port_lock);
+	gser = port->port_usb;
+	if (gser && gser->send_break)
+		status = gser->send_break(gser, duration);
+	spin_unlock_irq(&port->port_lock);
+
+	return status;
 }
 
 static const struct tty_operations gs_tty_ops = {
@@ -931,6 +1007,7 @@
 	.write_room =		gs_write_room,
 	.chars_in_buffer =	gs_chars_in_buffer,
 	.unthrottle =		gs_unthrottle,
+	.break_ctl =		gs_break_ctl,
 };
 
 /*-------------------------------------------------------------------------*/
@@ -953,6 +1030,7 @@
 	tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
 
 	INIT_LIST_HEAD(&port->read_pool);
+	INIT_LIST_HEAD(&port->read_queue);
 	INIT_LIST_HEAD(&port->write_pool);
 
 	port->port_num = port_num;
@@ -997,7 +1075,7 @@
 
 	gs_tty_driver->owner = THIS_MODULE;
 	gs_tty_driver->driver_name = "g_serial";
-	gs_tty_driver->name = "ttyGS";
+	gs_tty_driver->name = PREFIX;
 	/* uses dynamically assigned dev_t values */
 
 	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1104,6 +1182,8 @@
 		ports[i].port = NULL;
 		mutex_unlock(&ports[i].lock);
 
+		tasklet_kill(&port->push);
+
 		/* wait for old opens to finish */
 		wait_event(port->close_wait, gs_closed(port));
 
@@ -1175,14 +1255,17 @@
 
 	/* REVISIT if waiting on "carrier detect", signal. */
 
-	/* REVISIT for ACM, issue "network connection" status notification:
-	 * connected if open_count, else disconnected.
+	/* if it's already open, start I/O ... and notify the serial
+	 * protocol about open/close status (connect/disconnect).
 	 */
-
-	/* if it's already open, start I/O */
 	if (port->open_count) {
 		pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
 		gs_start_io(port);
+		if (gser->connect)
+			gser->connect(gser);
+	} else {
+		if (gser->disconnect)
+			gser->disconnect(gser);
 	}
 
 	spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1241,6 +1324,7 @@
 	if (port->open_count == 0 && !port->openclose)
 		gs_buf_free(&port->port_write_buf);
 	gs_free_requests(gser->out, &port->read_pool);
+	gs_free_requests(gser->out, &port->read_queue);
 	gs_free_requests(gser->in, &port->write_pool);
 	spin_unlock_irqrestore(&port->port_lock, flags);
 }
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 7b56113..af3910d 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -23,8 +23,7 @@
  * style I/O using the USB peripheral endpoints listed here, including
  * hookups to sysfs and /dev for each logical "tty" device.
  *
- * REVISIT need TTY --> USB event flow too, so ACM can report open/close
- * as carrier detect events.  Model after ECM.  There's more ACM state too.
+ * REVISIT at least ACM could support tiocmget() if needed.
  *
  * REVISIT someday, allow multiplexing several TTYs over these endpoints.
  */
@@ -41,8 +40,17 @@
 
 	/* REVISIT avoid this CDC-ACM support harder ... */
 	struct usb_cdc_line_coding port_line_coding;	/* 9600-8-N-1 etc */
+
+	/* notification callbacks */
+	void (*connect)(struct gserial *p);
+	void (*disconnect)(struct gserial *p);
+	int (*send_break)(struct gserial *p, int duration);
 };
 
+/* utilities to allocate/free request and buffer */
+struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
+void gs_free_req(struct usb_ep *, struct usb_request *req);
+
 /* port setup/teardown is handled by gadget driver */
 int gserial_setup(struct usb_gadget *g, unsigned n_ports);
 void gserial_cleanup(void);
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c858f2a..d22a84f 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -126,9 +126,8 @@
  * doesn't quite work because some people have to enforce 32-bit access
  */
 static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
-		__u32 __iomem *dst, u32 offset, u32 len)
+		__u32 __iomem *dst, u32 len)
 {
-	struct usb_hcd *hcd = priv_to_hcd(priv);
 	u32 val;
 	u8 *buff8;
 
@@ -136,11 +135,6 @@
 		printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
 		return;
 	}
-	isp1760_writel(offset,  hcd->regs + HC_MEMORY_REG);
-	/* XXX
-	 * 90nsec delay, the spec says something how this could be avoided.
-	 */
-	mdelay(1);
 
 	while (len >= 4) {
 		*src = __raw_readl(dst);
@@ -987,8 +981,20 @@
 			printk(KERN_ERR "qh is 0\n");
 			continue;
 		}
-		priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs,
-				atl_regs, sizeof(ptd));
+		isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs +
+				HC_MEMORY_REG);
+		isp1760_writel(payload  + ISP_BANK(1), usb_hcd->regs +
+				HC_MEMORY_REG);
+		/*
+		 * write bank1 address twice to ensure the 90ns delay (time
+		 * between BANK0 write and the priv_read_copy() call is at
+		 * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
+		 */
+		isp1760_writel(payload  + ISP_BANK(1), usb_hcd->regs +
+				HC_MEMORY_REG);
+
+		priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs +
+				ISP_BANK(0), sizeof(ptd));
 
 		dw1 = le32_to_cpu(ptd.dw1);
 		dw2 = le32_to_cpu(ptd.dw2);
@@ -1091,7 +1097,7 @@
 			case IN_PID:
 				priv_read_copy(priv,
 					priv->atl_ints[queue_entry].data_buffer,
-					usb_hcd->regs + payload, payload,
+					usb_hcd->regs + payload + ISP_BANK(1),
 					length);
 
 			case OUT_PID:
@@ -1122,11 +1128,11 @@
 		} else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
 			/* short BULK received */
 
-			printk(KERN_ERR "short bulk, %d instead %zu\n", length,
-					qtd->length);
 			if (urb->transfer_flags & URB_SHORT_NOT_OK) {
 				urb->status = -EREMOTEIO;
-				printk(KERN_ERR "not okey\n");
+				isp1760_dbg(priv, "short bulk, %d instead %zu "
+					"with URB_SHORT_NOT_OK flag.\n",
+					length, qtd->length);
 			}
 
 			if (urb->status == -EINPROGRESS)
@@ -1206,8 +1212,20 @@
 			continue;
 		}
 
-		priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs,
-				int_regs, sizeof(ptd));
+		isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs +
+				HC_MEMORY_REG);
+		isp1760_writel(payload  + ISP_BANK(1), usb_hcd->regs +
+				HC_MEMORY_REG);
+		/*
+		 * write bank1 address twice to ensure the 90ns delay (time
+		 * between BANK0 write and the priv_read_copy() call is at
+		 * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
+		 */
+		isp1760_writel(payload  + ISP_BANK(1), usb_hcd->regs +
+				HC_MEMORY_REG);
+
+		priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs +
+				ISP_BANK(0), sizeof(ptd));
 		dw1 = le32_to_cpu(ptd.dw1);
 		dw3 = le32_to_cpu(ptd.dw3);
 		check_int_err_status(le32_to_cpu(ptd.dw4));
@@ -1242,7 +1260,7 @@
 			case IN_PID:
 				priv_read_copy(priv,
 					priv->int_ints[queue_entry].data_buffer,
-					usb_hcd->regs + payload , payload,
+					usb_hcd->regs + payload + ISP_BANK(1),
 					length);
 			case OUT_PID:
 
@@ -1615,8 +1633,7 @@
 		return -EPIPE;
 	}
 
-	isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
-	return 0;
+	return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
 }
 
 static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 6473dd8..4377277 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -54,6 +54,8 @@
 #define BUFFER_MAP		0x7
 
 #define HC_MEMORY_REG		0x33c
+#define ISP_BANK(x)		((x) << 16)
+
 #define HC_PORT1_CTRL		0x374
 #define PORT1_POWER		(3 << 3)
 #define PORT1_INIT1		(1 << 7)
@@ -119,6 +121,9 @@
 typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
 		struct isp1760_qtd *qtd);
 
+#define isp1760_dbg(priv, fmt, args...) \
+	dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args)
+
 #define isp1760_info(priv, fmt, args...) \
 	dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
 
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 26bc479..8990196 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -86,6 +86,21 @@
 static int ohci_restart (struct ohci_hcd *ohci);
 #endif
 
+#ifdef CONFIG_PCI
+static void quirk_amd_pll(int state);
+static void amd_iso_dev_put(void);
+#else
+static inline void quirk_amd_pll(int state)
+{
+	return;
+}
+static inline void amd_iso_dev_put(void)
+{
+	return;
+}
+#endif
+
+
 #include "ohci-hub.c"
 #include "ohci-dbg.c"
 #include "ohci-mem.c"
@@ -483,6 +498,9 @@
 	int ret;
 	struct usb_hcd *hcd = ohci_to_hcd(ohci);
 
+	if (distrust_firmware)
+		ohci->flags |= OHCI_QUIRK_HUB_POWER;
+
 	disable (ohci);
 	ohci->regs = hcd->regs;
 
@@ -689,7 +707,8 @@
 		temp |= RH_A_NOCP;
 		temp &= ~(RH_A_POTPGT | RH_A_NPS);
 		ohci_writel (ohci, temp, &ohci->regs->roothub.a);
-	} else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) {
+	} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
+			(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
 		/* hub power always on; required for AMD-756 and some
 		 * Mac platforms.  ganged overcurrent reporting, if any.
 		 */
@@ -882,6 +901,8 @@
 
 	if (quirk_zfmicro(ohci))
 		del_timer(&ohci->unlink_watchdog);
+	if (quirk_amdiso(ohci))
+		amd_iso_dev_put();
 
 	remove_debug_files (ohci);
 	ohci_mem_cleanup (ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index b567392..439beb7 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -483,6 +483,13 @@
 		length++;
 	}
 
+	/* Some broken controllers never turn off RHCS in the interrupt
+	 * status register.  For their sake we won't re-enable RHSC
+	 * interrupts if the flag is already set.
+	 */
+	if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC)
+		changed = 1;
+
 	/* look at each port */
 	for (i = 0; i < ohci->num_ports; i++) {
 		u32	status = roothub_portstatus (ohci, i);
@@ -572,8 +579,6 @@
 	return 0;
 }
 
-static void start_hnp(struct ohci_hcd *ohci);
-
 #else
 
 #define	ohci_start_port_reset		NULL
@@ -760,7 +765,7 @@
 #ifdef	CONFIG_USB_OTG
 			if (hcd->self.otg_port == (wIndex + 1)
 					&& hcd->self.b_hnp_enable)
-				start_hnp(ohci);
+				ohci->start_hnp(ohci);
 			else
 #endif
 			ohci_writel (ohci, RH_PS_PSS,
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 94dfca0..3d532b7 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -225,6 +225,7 @@
 			dev_err(hcd->self.controller, "can't find transceiver\n");
 			return -ENODEV;
 		}
+		ohci->start_hnp = start_hnp;
 	}
 #endif
 
@@ -260,7 +261,7 @@
 			omap_cfg_reg(W4_USB_HIGHZ);
 		}
 		ohci_writel(ohci, rh, &ohci->regs->roothub.a);
-		distrust_firmware = 0;
+		ohci->flags &= ~OHCI_QUIRK_HUB_POWER;
 	} else if (machine_is_nokia770()) {
 		/* We require a self-powered hub, which should have
 		 * plenty of power. */
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 4696cc9..083e8df 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -18,6 +18,28 @@
 #error "This file is PCI bus glue.  CONFIG_PCI must be defined."
 #endif
 
+#include <linux/pci.h>
+#include <linux/io.h>
+
+
+/* constants used to work around PM-related transfer
+ * glitches in some AMD 700 series southbridges
+ */
+#define AB_REG_BAR	0xf0
+#define AB_INDX(addr)	((addr) + 0x00)
+#define AB_DATA(addr)	((addr) + 0x04)
+#define AX_INDXC	0X30
+#define AX_DATAC	0x34
+
+#define NB_PCIE_INDX_ADDR	0xe0
+#define NB_PCIE_INDX_DATA	0xe4
+#define PCIE_P_CNTL		0x10040
+#define BIF_NB			0x10002
+
+static struct pci_dev *amd_smbus_dev;
+static struct pci_dev *amd_hb_dev;
+static int amd_ohci_iso_count;
+
 /*-------------------------------------------------------------------------*/
 
 static int broken_suspend(struct usb_hcd *hcd)
@@ -143,6 +165,103 @@
 	return 0;
 }
 
+static int ohci_quirk_amd700(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	u8 rev = 0;
+
+	if (!amd_smbus_dev)
+		amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+				PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+	if (!amd_smbus_dev)
+		return 0;
+
+	pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+	if ((rev > 0x3b) || (rev < 0x30)) {
+		pci_dev_put(amd_smbus_dev);
+		amd_smbus_dev = NULL;
+		return 0;
+	}
+
+	amd_ohci_iso_count++;
+
+	if (!amd_hb_dev)
+		amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL);
+
+	ohci->flags |= OHCI_QUIRK_AMD_ISO;
+	ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n");
+
+	return 0;
+}
+
+/*
+ * The hardware normally enables the A-link power management feature, which
+ * lets the system lower the power consumption in idle states.
+ *
+ * Assume the system is configured to have USB 1.1 ISO transfers going
+ * to or from a USB device.  Without this quirk, that stream may stutter
+ * or have breaks occasionally.  For transfers going to speakers, this
+ * makes a very audible mess...
+ *
+ * That audio playback corruption is due to the audio stream getting
+ * interrupted occasionally when the link goes in lower power state
+ * This USB quirk prevents the link going into that lower power state
+ * during audio playback or other ISO operations.
+ */
+static void quirk_amd_pll(int on)
+{
+	u32 addr;
+	u32 val;
+	u32 bit = (on > 0) ? 1 : 0;
+
+	pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr);
+
+	/* BIT names/meanings are NDA-protected, sorry ... */
+
+	outl(AX_INDXC, AB_INDX(addr));
+	outl(0x40, AB_DATA(addr));
+	outl(AX_DATAC, AB_INDX(addr));
+	val = inl(AB_DATA(addr));
+	val &= ~((1 << 3) | (1 << 4) | (1 << 9));
+	val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9);
+	outl(val, AB_DATA(addr));
+
+	if (amd_hb_dev) {
+		addr = PCIE_P_CNTL;
+		pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
+
+		pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
+		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
+		val |= bit | (bit << 3) | (bit << 12);
+		val |= ((!bit) << 4) | ((!bit) << 9);
+		pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
+
+		addr = BIF_NB;
+		pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
+
+		pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
+		val &= ~(1 << 8);
+		val |= bit << 8;
+		pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
+	}
+}
+
+static void amd_iso_dev_put(void)
+{
+	amd_ohci_iso_count--;
+	if (amd_ohci_iso_count == 0) {
+		if (amd_smbus_dev) {
+			pci_dev_put(amd_smbus_dev);
+			amd_smbus_dev = NULL;
+		}
+		if (amd_hb_dev) {
+			pci_dev_put(amd_hb_dev);
+			amd_hb_dev = NULL;
+		}
+	}
+
+}
+
 /* List of quirks for OHCI */
 static const struct pci_device_id ohci_pci_quirks[] = {
 	{
@@ -181,6 +300,19 @@
 		PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152),
 		.driver_data = (unsigned long) broken_suspend,
 	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397),
+		.driver_data = (unsigned long)ohci_quirk_amd700,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398),
+		.driver_data = (unsigned long)ohci_quirk_amd700,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
+		.driver_data = (unsigned long)ohci_quirk_amd700,
+	},
+
 	/* FIXME for some of the early AMD 760 southbridges, OHCI
 	 * won't work at all.  blacklist them.
 	 */
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 6a9b4c5..c2d80f8 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -49,6 +49,9 @@
 	switch (usb_pipetype (urb->pipe)) {
 	case PIPE_ISOCHRONOUS:
 		ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
+		if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
+				&& quirk_amdiso(ohci))
+			quirk_amd_pll(1);
 		break;
 	case PIPE_INTERRUPT:
 		ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
@@ -677,6 +680,9 @@
 				data + urb->iso_frame_desc [cnt].offset,
 				urb->iso_frame_desc [cnt].length, urb, cnt);
 		}
+		if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
+				&& quirk_amdiso(ohci))
+			quirk_amd_pll(0);
 		periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
 			&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
 		break;
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index dc544dd..faf622e 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -371,6 +371,7 @@
 	 * other external transceivers should be software-transparent
 	 */
 	struct otg_transceiver	*transceiver;
+	void (*start_hnp)(struct ohci_hcd *ohci);
 
 	/*
 	 * memory management for queue data structures
@@ -399,6 +400,8 @@
 #define	OHCI_QUIRK_ZFMICRO	0x20			/* Compaq ZFMicro chipset*/
 #define	OHCI_QUIRK_NEC		0x40			/* lost interrupts */
 #define	OHCI_QUIRK_FRAME_NO	0x80			/* no big endian frame_no shift */
+#define	OHCI_QUIRK_HUB_POWER	0x100			/* distrust firmware power/oc setup */
+#define	OHCI_QUIRK_AMD_ISO	0x200			/* ISO transfers*/
 	// there are also chip quirks/bugs in init logic
 
 	struct work_struct	nec_work;	/* Worker for NEC quirk */
@@ -426,6 +429,10 @@
 {
 	return ohci->flags & OHCI_QUIRK_ZFMICRO;
 }
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+	return ohci->flags & OHCI_QUIRK_AMD_ISO;
+}
 #else
 static inline int quirk_nec(struct ohci_hcd *ohci)
 {
@@ -435,6 +442,10 @@
 {
 	return 0;
 }
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+	return 0;
+}
 #endif
 
 /* convert between an hcd pointer and the corresponding ohci_hcd */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index d5f02dd..ea7126f 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -964,11 +964,34 @@
 	disable_irq_nrdy(r8a66597, pipenum);
 }
 
+static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
+{
+	mod_timer(&r8a66597->rh_timer,
+			jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
+}
+
+static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
+					int connect)
+{
+	struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+	rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
+	rh->scount = R8A66597_MAX_SAMPLING;
+	if (connect)
+		rh->port |= 1 << USB_PORT_FEAT_CONNECTION;
+	else
+		rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION);
+	rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION;
+
+	r8a66597_root_hub_start_polling(r8a66597);
+}
+
 /* this function must be called with interrupt disabled */
 static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
 					u16 syssts)
 {
 	if (syssts == SE0) {
+		r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
 		r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
 		return;
 	}
@@ -1002,13 +1025,10 @@
 {
 	struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
 
-	r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION);
-	r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION);
-
 	disable_r8a66597_pipe_all(r8a66597, dev);
 	free_usb_address(r8a66597, dev);
 
-	r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
+	start_root_hub_sampling(r8a66597, port, 0);
 }
 
 /* this function must be called with interrupt disabled */
@@ -1551,23 +1571,6 @@
 	}
 }
 
-static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
-{
-	mod_timer(&r8a66597->rh_timer,
-			jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
-}
-
-static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port)
-{
-	struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
-
-	rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
-	rh->scount = R8A66597_MAX_SAMPLING;
-	r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION)
-					 | (1 << USB_PORT_FEAT_C_CONNECTION);
-	r8a66597_root_hub_start_polling(r8a66597);
-}
-
 static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
 {
 	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
@@ -1594,7 +1597,7 @@
 			r8a66597_bclr(r8a66597, ATTCHE, INTENB2);
 
 			/* start usb bus sampling */
-			start_root_hub_sampling(r8a66597, 1);
+			start_root_hub_sampling(r8a66597, 1, 1);
 		}
 		if (mask2 & DTCH) {
 			r8a66597_write(r8a66597, ~DTCH, INTSTS2);
@@ -1609,7 +1612,7 @@
 			r8a66597_bclr(r8a66597, ATTCHE, INTENB1);
 
 			/* start usb bus sampling */
-			start_root_hub_sampling(r8a66597, 0);
+			start_root_hub_sampling(r8a66597, 0, 1);
 		}
 		if (mask1 & DTCH) {
 			r8a66597_write(r8a66597, ~DTCH, INTSTS1);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 001789c..4ea50e0 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -42,16 +42,6 @@
 	  To compile this driver as a module, choose M here.  The module
 	  will be called adutux.
 
-config USB_AUERSWALD
-	tristate "USB Auerswald ISDN support"
-	depends on USB
-	help
-	  Say Y here if you want to connect an Auerswald USB ISDN Device
-	  to your computer's USB port.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called auerswald.
-
 config USB_RIO500
 	tristate "USB Diamond Rio500 support"
 	depends on USB
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index aba091c..45b4e12 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
 
 obj-$(CONFIG_USB_ADUTUX)	+= adutux.o
 obj-$(CONFIG_USB_APPLEDISPLAY)	+= appledisplay.o
-obj-$(CONFIG_USB_AUERSWALD)	+= auerswald.o
 obj-$(CONFIG_USB_BERRY_CHARGE)	+= berry_charge.o
 obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
 obj-$(CONFIG_USB_CYTHERM)	+= cytherm.o
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
deleted file mode 100644
index d2f61d5..0000000
--- a/drivers/usb/misc/auerswald.c
+++ /dev/null
@@ -1,2152 +0,0 @@
-/*****************************************************************************/
-/*
- *      auerswald.c  --  Auerswald PBX/System Telephone usb driver.
- *
- *      Copyright (C) 2001  Wolfgang Mües (wolfgang@iksw-muees.de)
- *
- *      Very much code of this driver is borrowed from dabusb.c (Deti Fliegl)
- *      and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you.
- *
- *      This program is free software; you can redistribute it and/or modify
- *      it under the terms of the GNU General Public License as published by
- *      the Free Software Foundation; either version 2 of the License, or
- *      (at your option) any later version.
- *
- *      This program is distributed in the hope that it will be useful,
- *      but WITHOUT ANY WARRANTY; without even the implied warranty of
- *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *      GNU General Public License for more details.
- *
- *      You should have received a copy of the GNU General Public License
- *      along with this program; if not, write to the Free Software
- *      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
- /*****************************************************************************/
-
-/* Standard Linux module include files */
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/usb.h>
-#include <linux/mutex.h>
-
-/*-------------------------------------------------------------------*/
-/* Debug support 						     */
-#ifdef DEBUG
-#define dump( adr, len) \
-do {			\
-	unsigned int u;	\
-	printk (KERN_DEBUG); \
-	for (u = 0; u < len; u++) \
-		printk (" %02X", adr[u] & 0xFF); \
-	printk ("\n"); \
-} while (0)
-#else
-#define dump( adr, len)
-#endif
-
-/*-------------------------------------------------------------------*/
-/* Version Information */
-#define DRIVER_VERSION "0.9.11"
-#define DRIVER_AUTHOR  "Wolfgang Mües <wolfgang@iksw-muees.de>"
-#define DRIVER_DESC    "Auerswald PBX/System Telephone usb driver"
-
-/*-------------------------------------------------------------------*/
-/* Private declarations for Auerswald USB driver                     */
-
-/* Auerswald Vendor ID */
-#define ID_AUERSWALD  	0x09BF
-
-#define AUER_MINOR_BASE	112	/* auerswald driver minor number */
-
-/* we can have up to this number of device plugged in at once */
-#define AUER_MAX_DEVICES 16
-
-
-/* Number of read buffers for each device */
-#define AU_RBUFFERS     10
-
-/* Number of chain elements for each control chain */
-#define AUCH_ELEMENTS   20
-
-/* Number of retries in communication */
-#define AU_RETRIES	10
-
-/*-------------------------------------------------------------------*/
-/* vendor specific protocol                                          */
-/* Header Byte */
-#define AUH_INDIRMASK   0x80    /* mask for direct/indirect bit */
-#define AUH_DIRECT      0x00    /* data is for USB device */
-#define AUH_INDIRECT    0x80    /* USB device is relay */
-
-#define AUH_SPLITMASK   0x40    /* mask for split bit */
-#define AUH_UNSPLIT     0x00    /* data block is full-size */
-#define AUH_SPLIT       0x40    /* data block is part of a larger one,
-                                   split-byte follows */
-
-#define AUH_TYPEMASK    0x3F    /* mask for type of data transfer */
-#define AUH_TYPESIZE    0x40    /* different types */
-#define AUH_DCHANNEL    0x00    /* D channel data */
-#define AUH_B1CHANNEL   0x01    /* B1 channel transparent */
-#define AUH_B2CHANNEL   0x02    /* B2 channel transparent */
-/*                0x03..0x0F       reserved for driver internal use */
-#define AUH_COMMAND     0x10    /* Command channel */
-#define AUH_BPROT       0x11    /* Configuration block protocol */
-#define AUH_DPROTANA    0x12    /* D channel protocol analyzer */
-#define AUH_TAPI        0x13    /* telephone api data (ATD) */
-/*                0x14..0x3F       reserved for other protocols */
-#define AUH_UNASSIGNED  0xFF    /* if char device has no assigned service */
-#define AUH_FIRSTUSERCH 0x11    /* first channel which is available for driver users */
-
-#define AUH_SIZE	1 	/* Size of Header Byte */
-
-/* Split Byte. Only present if split bit in header byte set.*/
-#define AUS_STARTMASK   0x80    /* mask for first block of splitted frame */
-#define AUS_FIRST       0x80    /* first block */
-#define AUS_FOLLOW      0x00    /* following block */
-
-#define AUS_ENDMASK     0x40    /* mask for last block of splitted frame */
-#define AUS_END         0x40    /* last block */
-#define AUS_NOEND       0x00    /* not the last block */
-
-#define AUS_LENMASK     0x3F    /* mask for block length information */
-
-/* Request types */
-#define AUT_RREQ        (USB_DIR_IN  | USB_TYPE_VENDOR | USB_RECIP_OTHER)   /* Read Request */
-#define AUT_WREQ        (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER)   /* Write Request */
-
-/* Vendor Requests */
-#define AUV_GETINFO     0x00    /* GetDeviceInfo */
-#define AUV_WBLOCK      0x01    /* Write Block */
-#define AUV_RBLOCK      0x02    /* Read Block */
-#define AUV_CHANNELCTL  0x03    /* Channel Control */
-#define AUV_DUMMY	0x04	/* Dummy Out for retry */
-
-/* Device Info Types */
-#define AUDI_NUMBCH     0x0000  /* Number of supported B channels */
-#define AUDI_OUTFSIZE   0x0001  /* Size of OUT B channel fifos */
-#define AUDI_MBCTRANS   0x0002  /* max. Blocklength of control transfer */
-
-/* Interrupt endpoint definitions */
-#define AU_IRQENDP      1       /* Endpoint number */
-#define AU_IRQCMDID     16      /* Command-block ID */
-#define AU_BLOCKRDY     0       /* Command: Block data ready on ctl endpoint */
-#define AU_IRQMINSIZE	5	/* Nr. of bytes decoded in this driver */
-
-/* Device String Descriptors */
-#define AUSI_VENDOR   	1	/* "Auerswald GmbH & Co. KG" */
-#define AUSI_DEVICE   	2	/* Name of the Device */
-#define AUSI_SERIALNR 	3	/* Serial Number */
-#define AUSI_MSN      	4	/* "MSN ..." (first) Multiple Subscriber Number */
-
-#define AUSI_DLEN	100	/* Max. Length of Device Description */
-
-#define AUV_RETRY	0x101	/* First Firmware version which can do control retries */
-
-/*-------------------------------------------------------------------*/
-/* External data structures / Interface                              */
-typedef struct
-{
-	char __user *buf;	/* return buffer for string contents */
-	unsigned int bsize;	/* size of return buffer */
-} audevinfo_t,*paudevinfo_t;
-
-/* IO controls */
-#define IOCTL_AU_SLEN	  _IOR( 'U', 0xF0, int)         /* return the max. string descriptor length */
-#define IOCTL_AU_DEVINFO  _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */
-#define IOCTL_AU_SERVREQ  _IOW( 'U', 0xF2, int) 	/* request a service channel */
-#define IOCTL_AU_BUFLEN	  _IOR( 'U', 0xF3, int)         /* return the max. buffer length for the device */
-#define IOCTL_AU_RXAVAIL  _IOR( 'U', 0xF4, int)         /* return != 0 if Receive Data available */
-#define IOCTL_AU_CONNECT  _IOR( 'U', 0xF5, int)         /* return != 0 if connected to a service channel */
-#define IOCTL_AU_TXREADY  _IOR( 'U', 0xF6, int)         /* return != 0 if Transmitt channel ready to send */
-/*                              'U'  0xF7..0xFF reseved */
-
-/*-------------------------------------------------------------------*/
-/* Internal data structures                                          */
-
-/* ..................................................................*/
-/* urb chain element */
-struct  auerchain;                      /* forward for circular reference */
-typedef struct
-{
-        struct auerchain *chain;        /* pointer to the chain to which this element belongs */
-        struct urb * urbp;                   /* pointer to attached urb */
-        void *context;                  /* saved URB context */
-        usb_complete_t complete;        /* saved URB completion function */
-        struct list_head list;          /* to include element into a list */
-} auerchainelement_t,*pauerchainelement_t;
-
-/* urb chain */
-typedef struct auerchain
-{
-        pauerchainelement_t active;     /* element which is submitted to urb */
-	spinlock_t lock;                /* protection agains interrupts */
-        struct list_head waiting_list;  /* list of waiting elements */
-        struct list_head free_list;     /* list of available elements */
-} auerchain_t,*pauerchain_t;
-
-/* urb blocking completion helper struct */
-typedef struct
-{
-	wait_queue_head_t wqh;    	/* wait for completion */
-	unsigned int done;		/* completion flag */
-} auerchain_chs_t,*pauerchain_chs_t;
-
-/* ...................................................................*/
-/* buffer element */
-struct  auerbufctl;                     /* forward */
-typedef struct
-{
-        char *bufp;                     /* reference to allocated data buffer */
-        unsigned int len;               /* number of characters in data buffer */
-	unsigned int retries;		/* for urb retries */
-        struct usb_ctrlrequest *dr;	/* for setup data in control messages */
-        struct urb * urbp;                   /* USB urb */
-        struct auerbufctl *list;        /* pointer to list */
-        struct list_head buff_list;     /* reference to next buffer in list */
-} auerbuf_t,*pauerbuf_t;
-
-/* buffer list control block */
-typedef struct auerbufctl
-{
-        spinlock_t lock;                /* protection in interrupt */
-        struct list_head free_buff_list;/* free buffers */
-        struct list_head rec_buff_list; /* buffers with receive data */
-} auerbufctl_t,*pauerbufctl_t;
-
-/* ...................................................................*/
-/* service context */
-struct  auerscon;                       /* forward */
-typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t);
-typedef void (*auer_disconn_t) (struct auerscon*);
-typedef struct auerscon
-{
-        unsigned int id;                /* protocol service id AUH_xxxx */
-        auer_dispatch_t dispatch;       /* dispatch read buffer */
-	auer_disconn_t disconnect;	/* disconnect from device, wake up all char readers */
-} auerscon_t,*pauerscon_t;
-
-/* ...................................................................*/
-/* USB device context */
-typedef struct
-{
-	struct mutex 	mutex;         	    /* protection in user context */
-	char 			name[20];	    /* name of the /dev/usb entry */
-	unsigned int		dtindex;	    /* index in the device table */
-	struct usb_device *	usbdev;      	    /* USB device handle */
-	int			open_count;	    /* count the number of open character channels */
-        char 			dev_desc[AUSI_DLEN];/* for storing a textual description */
-        unsigned int 		maxControlLength;   /* max. Length of control paket (without header) */
-        struct urb * 		inturbp;            /* interrupt urb */
-        char *			intbufp;            /* data buffer for interrupt urb */
-	unsigned int 		irqsize;	    /* size of interrupt endpoint 1 */
-        struct auerchain 	controlchain;  	    /* for chaining of control messages */
-	auerbufctl_t 		bufctl;             /* Buffer control for control transfers */
-        pauerscon_t 	     	services[AUH_TYPESIZE];/* context pointers for each service */
-	unsigned int		version;	    /* Version of the device */
-	wait_queue_head_t 	bufferwait;         /* wait for a control buffer */
-} auerswald_t,*pauerswald_t;
-
-/* ................................................................... */
-/* character device context */
-typedef struct
-{
-	struct mutex mutex;		/* protection in user context */
-	pauerswald_t auerdev;           /* context pointer of assigned device */
-        auerbufctl_t bufctl;            /* controls the buffer chain */
-        auerscon_t scontext;            /* service context */
-	wait_queue_head_t readwait;     /* for synchronous reading */
-	struct mutex readmutex;		/* protection against multiple reads */
-	pauerbuf_t readbuf;		/* buffer held for partial reading */
-	unsigned int readoffset;	/* current offset in readbuf */
-	unsigned int removed;		/* is != 0 if device is removed */
-} auerchar_t,*pauerchar_t;
-
-
-/*-------------------------------------------------------------------*/
-/* Forwards */
-static void auerswald_ctrlread_complete (struct urb * urb);
-static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp);
-static struct usb_driver auerswald_driver;
-
-
-/*-------------------------------------------------------------------*/
-/* USB chain helper functions                                        */
-/* --------------------------                                        */
-
-/* completion function for chained urbs */
-static void auerchain_complete (struct urb * urb)
-{
-	unsigned long flags;
-        int result;
-
-        /* get pointer to element and to chain */
-	pauerchainelement_t acep = urb->context;
-        pauerchain_t         acp = acep->chain;
-
-        /* restore original entries in urb */
-        urb->context  = acep->context;
-        urb->complete = acep->complete;
-
-        dbg ("auerchain_complete called");
-
-        /* call original completion function
-           NOTE: this function may lead to more urbs submitted into the chain.
-                 (no chain lock at calling complete()!)
-                 acp->active != NULL is protecting us against recursion.*/
-        urb->complete (urb);
-
-        /* detach element from chain data structure */
-	spin_lock_irqsave (&acp->lock, flags);
-        if (acp->active != acep) /* paranoia debug check */
-	        dbg ("auerchain_complete: completion on non-active element called!");
-        else
-                acp->active = NULL;
-
-        /* add the used chain element to the list of free elements */
-	list_add_tail (&acep->list, &acp->free_list);
-        acep = NULL;
-
-        /* is there a new element waiting in the chain? */
-        if (!acp->active && !list_empty (&acp->waiting_list)) {
-                /* yes: get the entry */
-                struct list_head *tmp = acp->waiting_list.next;
-                list_del (tmp);
-                acep = list_entry (tmp, auerchainelement_t, list);
-                acp->active = acep;
-        }
-        spin_unlock_irqrestore (&acp->lock, flags);
-
-        /* submit the new urb */
-        if (acep) {
-                urb    = acep->urbp;
-                dbg ("auerchain_complete: submitting next urb from chain");
-		urb->status = 0;	/* needed! */
-		result = usb_submit_urb(urb, GFP_ATOMIC);
-
-                /* check for submit errors */
-                if (result) {
-                        urb->status = result;
-                        dbg("auerchain_complete: usb_submit_urb with error code %d", result);
-                        /* and do error handling via *this* completion function (recursive) */
-                        auerchain_complete( urb);
-                }
-        } else {
-                /* simple return without submitting a new urb.
-                   The empty chain is detected with acp->active == NULL. */
-        };
-}
-
-
-/* submit function for chained urbs
-   this function may be called from completion context or from user space!
-   early = 1 -> submit in front of chain
-*/
-static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int early)
-{
-        int result;
-        unsigned long flags;
-        pauerchainelement_t acep = NULL;
-
-        dbg ("auerchain_submit_urb called");
-
-        /* try to get a chain element */
-        spin_lock_irqsave (&acp->lock, flags);
-        if (!list_empty (&acp->free_list)) {
-                /* yes: get the entry */
-                struct list_head *tmp = acp->free_list.next;
-                list_del (tmp);
-                acep = list_entry (tmp, auerchainelement_t, list);
-        }
-        spin_unlock_irqrestore (&acp->lock, flags);
-
-        /* if no chain element available: return with error */
-        if (!acep) {
-                return -ENOMEM;
-        }
-
-        /* fill in the new chain element values */
-        acep->chain    = acp;
-        acep->context  = urb->context;
-        acep->complete = urb->complete;
-        acep->urbp     = urb;
-        INIT_LIST_HEAD (&acep->list);
-
-        /* modify urb */
-        urb->context   = acep;
-        urb->complete  = auerchain_complete;
-        urb->status    = -EINPROGRESS;    /* usb_submit_urb does this, too */
-
-        /* add element to chain - or start it immediately */
-        spin_lock_irqsave (&acp->lock, flags);
-        if (acp->active) {
-                /* there is traffic in the chain, simple add element to chain */
-		if (early) {
-			dbg ("adding new urb to head of chain");
-			list_add (&acep->list, &acp->waiting_list);
-		} else {
-			dbg ("adding new urb to end of chain");
-			list_add_tail (&acep->list, &acp->waiting_list);
-		}
-		acep = NULL;
-        } else {
-                /* the chain is empty. Prepare restart */
-                acp->active = acep;
-        }
-        /* Spin has to be removed before usb_submit_urb! */
-        spin_unlock_irqrestore (&acp->lock, flags);
-
-        /* Submit urb if immediate restart */
-        if (acep) {
-                dbg("submitting urb immediate");
-		urb->status = 0;	/* needed! */
-                result = usb_submit_urb(urb, GFP_ATOMIC);
-                /* check for submit errors */
-                if (result) {
-                        urb->status = result;
-                        dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result);
-                        /* and do error handling via completion function */
-                        auerchain_complete( urb);
-                }
-        }
-
-        return 0;
-}
-
-/* submit function for chained urbs
-   this function may be called from completion context or from user space!
-*/
-static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb)
-{
-	return auerchain_submit_urb_list (acp, urb, 0);
-}
-
-/* cancel an urb which is submitted to the chain
-   the result is 0 if the urb is cancelled, or -EINPROGRESS if
-   the function is successfully started.
-*/
-static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb)
-{
-	unsigned long flags;
-        struct urb * urbp;
-        pauerchainelement_t acep;
-        struct list_head *tmp;
-
-        dbg ("auerchain_unlink_urb called");
-
-        /* search the chain of waiting elements */
-        spin_lock_irqsave (&acp->lock, flags);
-        list_for_each (tmp, &acp->waiting_list) {
-                acep = list_entry (tmp, auerchainelement_t, list);
-                if (acep->urbp == urb) {
-                        list_del (tmp);
-                        urb->context = acep->context;
-                        urb->complete = acep->complete;
-                        list_add_tail (&acep->list, &acp->free_list);
-                        spin_unlock_irqrestore (&acp->lock, flags);
-                        dbg ("unlink waiting urb");
-                        urb->status = -ENOENT;
-                        urb->complete (urb);
-                        return 0;
-                }
-        }
-        /* not found. */
-        spin_unlock_irqrestore (&acp->lock, flags);
-
-        /* get the active urb */
-        acep = acp->active;
-        if (acep) {
-                urbp = acep->urbp;
-
-                /* check if we have to cancel the active urb */
-                if (urbp == urb) {
-                        /* note that there is a race condition between the check above
-                           and the unlink() call because of no lock. This race is harmless,
-                           because the usb module will detect the unlink() after completion.
-                           We can't use the acp->lock here because the completion function
-                           wants to grab it.
-			*/
-                        dbg ("unlink active urb");
-                        return usb_unlink_urb (urbp);
-                }
-        }
-
-        /* not found anyway
-           ... is some kind of success
-	*/
-        dbg ("urb to unlink not found in chain");
-        return 0;
-}
-
-/* cancel all urbs which are in the chain.
-   this function must not be called from interrupt or completion handler.
-*/
-static void auerchain_unlink_all (pauerchain_t acp)
-{
-	unsigned long flags;
-        struct urb * urbp;
-        pauerchainelement_t acep;
-
-        dbg ("auerchain_unlink_all called");
-
-        /* clear the chain of waiting elements */
-        spin_lock_irqsave (&acp->lock, flags);
-        while (!list_empty (&acp->waiting_list)) {
-                /* get the next entry */
-                struct list_head *tmp = acp->waiting_list.next;
-                list_del (tmp);
-                acep = list_entry (tmp, auerchainelement_t, list);
-                urbp = acep->urbp;
-                urbp->context = acep->context;
-                urbp->complete = acep->complete;
-                list_add_tail (&acep->list, &acp->free_list);
-                spin_unlock_irqrestore (&acp->lock, flags);
-                dbg ("unlink waiting urb");
-                urbp->status = -ENOENT;
-                urbp->complete (urbp);
-                spin_lock_irqsave (&acp->lock, flags);
-        }
-        spin_unlock_irqrestore (&acp->lock, flags);
-
-        /* clear the active urb */
-        acep = acp->active;
-        if (acep) {
-                urbp = acep->urbp;
-                dbg ("unlink active urb");
-                usb_kill_urb (urbp);
-        }
-}
-
-
-/* free the chain.
-   this function must not be called from interrupt or completion handler.
-*/
-static void auerchain_free (pauerchain_t acp)
-{
-	unsigned long flags;
-        pauerchainelement_t acep;
-
-        dbg ("auerchain_free called");
-
-        /* first, cancel all pending urbs */
-        auerchain_unlink_all (acp);
-
-        /* free the elements */
-        spin_lock_irqsave (&acp->lock, flags);
-        while (!list_empty (&acp->free_list)) {
-                /* get the next entry */
-                struct list_head *tmp = acp->free_list.next;
-                list_del (tmp);
-                spin_unlock_irqrestore (&acp->lock, flags);
-		acep = list_entry (tmp, auerchainelement_t, list);
-                kfree (acep);
-	        spin_lock_irqsave (&acp->lock, flags);
-	}
-        spin_unlock_irqrestore (&acp->lock, flags);
-}
-
-
-/* Init the chain control structure */
-static void auerchain_init (pauerchain_t acp)
-{
-        /* init the chain data structure */
-        acp->active = NULL;
-	spin_lock_init (&acp->lock);
-        INIT_LIST_HEAD (&acp->waiting_list);
-        INIT_LIST_HEAD (&acp->free_list);
-}
-
-/* setup a chain.
-   It is assumed that there is no concurrency while setting up the chain
-   requirement: auerchain_init()
-*/
-static int auerchain_setup (pauerchain_t acp, unsigned int numElements)
-{
-        pauerchainelement_t acep;
-
-        dbg ("auerchain_setup called with %d elements", numElements);
-
-        /* fill the list of free elements */
-        for (;numElements; numElements--) {
-                acep = kzalloc(sizeof(auerchainelement_t), GFP_KERNEL);
-                if (!acep)
-			goto ac_fail;
-                INIT_LIST_HEAD (&acep->list);
-                list_add_tail (&acep->list, &acp->free_list);
-        }
-        return 0;
-
-ac_fail:/* free the elements */
-        while (!list_empty (&acp->free_list)) {
-                /* get the next entry */
-                struct list_head *tmp = acp->free_list.next;
-                list_del (tmp);
-                acep = list_entry (tmp, auerchainelement_t, list);
-                kfree (acep);
-        }
-        return -ENOMEM;
-}
-
-
-/* completion handler for synchronous chained URBs */
-static void auerchain_blocking_completion (struct urb *urb)
-{
-	pauerchain_chs_t pchs = urb->context;
-	pchs->done = 1;
-	wmb();
-	wake_up (&pchs->wqh);
-}
-
-
-/* Starts chained urb and waits for completion or timeout */
-static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int timeout, int* actual_length)
-{
-	auerchain_chs_t chs;
-	int status;
-
-	dbg ("auerchain_start_wait_urb called");
-	init_waitqueue_head (&chs.wqh);
-	chs.done = 0;
-
-	urb->context = &chs;
-	status = auerchain_submit_urb (acp, urb);
-	if (status)
-		/* something went wrong */
-		return status;
-
-	timeout = wait_event_timeout(chs.wqh, chs.done, timeout);
-
-	if (!timeout && !chs.done) {
-		if (urb->status != -EINPROGRESS) {	/* No callback?!! */
-			dbg ("auerchain_start_wait_urb: raced timeout");
-			status = urb->status;
-		} else {
-			dbg ("auerchain_start_wait_urb: timeout");
-			auerchain_unlink_urb (acp, urb);  /* remove urb safely */
-			status = -ETIMEDOUT;
-		}
-	} else
-		status = urb->status;
-
-	if (status >= 0)
-		*actual_length = urb->actual_length;
-
-  	return status;
-}
-
-
-/* auerchain_control_msg - Builds a control urb, sends it off and waits for completion
-   acp: pointer to the auerchain
-   dev: pointer to the usb device to send the message to
-   pipe: endpoint "pipe" to send the message to
-   request: USB message request value
-   requesttype: USB message request type value
-   value: USB message value
-   index: USB message index value
-   data: pointer to the data to send
-   size: length in bytes of the data to send
-   timeout: time to wait for the message to complete before timing out (if 0 the wait is forever)
-
-   This function sends a simple control message to a specified endpoint
-   and waits for the message to complete, or timeout.
-
-   If successful, it returns the transferred length, otherwise a negative error number.
-
-   Don't use this function from within an interrupt context, like a
-   bottom half handler.  If you need an asynchronous message, or need to send
-   a message from within interrupt context, use auerchain_submit_urb()
-*/
-static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
-			          __u16 value, __u16 index, void *data, __u16 size, int timeout)
-{
-	int ret;
-	struct usb_ctrlrequest *dr;
-	struct urb *urb;
-        int uninitialized_var(length);
-
-        dbg ("auerchain_control_msg");
-        dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
-	if (!dr)
-		return -ENOMEM;
-	urb = usb_alloc_urb (0, GFP_KERNEL);
-	if (!urb) {
-        	kfree (dr);
-		return -ENOMEM;
-        }
-
-	dr->bRequestType = requesttype;
-	dr->bRequest = request;
-	dr->wValue  = cpu_to_le16 (value);
-	dr->wIndex  = cpu_to_le16 (index);
-	dr->wLength = cpu_to_le16 (size);
-
-	usb_fill_control_urb (urb, dev, pipe, (unsigned char*)dr, data, size,    /* build urb */
-		          auerchain_blocking_completion, NULL);
-	ret = auerchain_start_wait_urb (acp, urb, timeout, &length);
-
-	usb_free_urb (urb);
-	kfree (dr);
-
-        if (ret < 0)
-		return ret;
-	else
-		return length;
-}
-
-
-/*-------------------------------------------------------------------*/
-/* Buffer List helper functions                                      */
-
-/* free a single auerbuf */
-static void auerbuf_free (pauerbuf_t bp)
-{
-	kfree(bp->bufp);
-	kfree(bp->dr);
-	usb_free_urb(bp->urbp);
-	kfree(bp);
-}
-
-/* free the buffers from an auerbuf list */
-static void auerbuf_free_list (struct list_head *q)
-{
-        struct list_head *tmp;
-	struct list_head *p;
-	pauerbuf_t bp;
-
-	dbg ("auerbuf_free_list");
-	for (p = q->next; p != q;) {
-		bp = list_entry (p, auerbuf_t, buff_list);
-		tmp = p->next;
-		list_del (p);
-		p = tmp;
-		auerbuf_free (bp);
-	}
-}
-
-/* init the members of a list control block */
-static void auerbuf_init (pauerbufctl_t bcp)
-{
-	dbg ("auerbuf_init");
-	spin_lock_init (&bcp->lock);
-        INIT_LIST_HEAD (&bcp->free_buff_list);
-        INIT_LIST_HEAD (&bcp->rec_buff_list);
-}
-
-/* free all buffers from an auerbuf chain */
-static void auerbuf_free_buffers (pauerbufctl_t bcp)
-{
-	unsigned long flags;
-	dbg ("auerbuf_free_buffers");
-
-        spin_lock_irqsave (&bcp->lock, flags);
-
-	auerbuf_free_list (&bcp->free_buff_list);
-	auerbuf_free_list (&bcp->rec_buff_list);
-
-        spin_unlock_irqrestore (&bcp->lock, flags);
-}
-
-/* setup a list of buffers */
-/* requirement: auerbuf_init() */
-static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize)
-{
-        pauerbuf_t bep = NULL;
-
-        dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize);
-
-        /* fill the list of free elements */
-        for (;numElements; numElements--) {
-                bep = kzalloc(sizeof(auerbuf_t), GFP_KERNEL);
-                if (!bep)
-			goto bl_fail;
-                bep->list = bcp;
-                INIT_LIST_HEAD (&bep->buff_list);
-                bep->bufp = kmalloc (bufsize, GFP_KERNEL);
-                if (!bep->bufp)
-			goto bl_fail;
-                bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL);
-                if (!bep->dr)
-			goto bl_fail;
-                bep->urbp = usb_alloc_urb (0, GFP_KERNEL);
-                if (!bep->urbp)
-			goto bl_fail;
-                list_add_tail (&bep->buff_list, &bcp->free_buff_list);
-        }
-        return 0;
-
-bl_fail:/* not enough memory. Free allocated elements */
-        dbg ("auerbuf_setup: no more memory");
-	auerbuf_free(bep);
-        auerbuf_free_buffers (bcp);
-        return -ENOMEM;
-}
-
-/* insert a used buffer into the free list */
-static void auerbuf_releasebuf( pauerbuf_t bp)
-{
-        unsigned long flags;
-        pauerbufctl_t bcp = bp->list;
-	bp->retries = 0;
-
-        dbg ("auerbuf_releasebuf called");
-        spin_lock_irqsave (&bcp->lock, flags);
-	list_add_tail (&bp->buff_list, &bcp->free_buff_list);
-        spin_unlock_irqrestore (&bcp->lock, flags);
-}
-
-
-/*-------------------------------------------------------------------*/
-/* Completion handlers */
-
-/* Values of urb->status or results of usb_submit_urb():
-0		Initial, OK
--EINPROGRESS	during submission until end
--ENOENT		if urb is unlinked
--ETIME		Device did not respond
--ENOMEM		Memory Overflow
--ENODEV		Specified USB-device or bus doesn't exist
--ENXIO		URB already queued
--EINVAL		a) Invalid transfer type specified (or not supported)
-		b) Invalid interrupt interval (0n256)
--EAGAIN		a) Specified ISO start frame too early
-		b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again.
--EFBIG		Too much ISO frames requested (currently uhci900)
--EPIPE		Specified pipe-handle/Endpoint is already stalled
--EMSGSIZE	Endpoint message size is zero, do interface/alternate setting
--EPROTO		a) Bitstuff error
-		b) Unknown USB error
--EILSEQ		CRC mismatch
--ENOSR		Buffer error
--EREMOTEIO	Short packet detected
--EXDEV		ISO transfer only partially completed look at individual frame status for details
--EINVAL		ISO madness, if this happens: Log off and go home
--EOVERFLOW	babble
-*/
-
-/* check if a status code allows a retry */
-static int auerswald_status_retry (int status)
-{
-	switch (status) {
-	case 0:
-	case -ETIME:
-	case -EOVERFLOW:
-	case -EAGAIN:
-	case -EPIPE:
-	case -EPROTO:
-	case -EILSEQ:
-	case -ENOSR:
-	case -EREMOTEIO:
-		return 1; /* do a retry */
-	}
-	return 0;	/* no retry possible */
-}
-
-/* Completion of asynchronous write block */
-static void auerchar_ctrlwrite_complete (struct urb * urb)
-{
-	pauerbuf_t bp =  urb->context;
-	pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
-	dbg ("auerchar_ctrlwrite_complete called");
-
-	/* reuse the buffer */
-	auerbuf_releasebuf (bp);
-	/* Wake up all processes waiting for a buffer */
-	wake_up (&cp->bufferwait);
-}
-
-/* Completion handler for dummy retry packet */
-static void auerswald_ctrlread_wretcomplete (struct urb * urb)
-{
-	pauerbuf_t bp = urb->context;
-        pauerswald_t cp;
-	int ret;
-	int status = urb->status;
-
-        dbg ("auerswald_ctrlread_wretcomplete called");
-        dbg ("complete with status: %d", status);
-	cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
-
-	/* check if it is possible to advance */
-	if (!auerswald_status_retry(status) || !cp->usbdev) {
-		/* reuse the buffer */
-		err ("control dummy: transmission error %d, can not retry", status);
-		auerbuf_releasebuf (bp);
-		/* Wake up all processes waiting for a buffer */
-		wake_up (&cp->bufferwait);
-		return;
-	}
-
-	/* fill the control message */
-	bp->dr->bRequestType = AUT_RREQ;
-	bp->dr->bRequest     = AUV_RBLOCK;
-	bp->dr->wLength      = bp->dr->wValue;	/* temporary stored */
-	bp->dr->wValue       = cpu_to_le16 (1);	/* Retry Flag */
-	/* bp->dr->index    = channel id;          remains */
-	usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
-                          (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength),
-		          auerswald_ctrlread_complete,bp);
-
-	/* submit the control msg as next paket */
-	ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
-        if (ret) {
-        	dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
-        	bp->urbp->status = ret;
-        	auerswald_ctrlread_complete (bp->urbp);
-    	}
-}
-
-/* completion handler for receiving of control messages */
-static void auerswald_ctrlread_complete (struct urb * urb)
-{
-        unsigned int  serviceid;
-        pauerswald_t  cp;
-        pauerscon_t   scp;
-	pauerbuf_t bp = urb->context;
-	int status = urb->status;
-	int ret;
-
-        dbg ("auerswald_ctrlread_complete called");
-
-	cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
-
-	/* check if there is valid data in this urb */
-        if (status) {
-		dbg ("complete with non-zero status: %d", status);
-		/* should we do a retry? */
-		if (!auerswald_status_retry(status)
-		 || !cp->usbdev
-		 || (cp->version < AUV_RETRY)
-                 || (bp->retries >= AU_RETRIES)) {
-			/* reuse the buffer */
-			err ("control read: transmission error %d, can not retry", status);
-			auerbuf_releasebuf (bp);
-			/* Wake up all processes waiting for a buffer */
-			wake_up (&cp->bufferwait);
-			return;
-		}
-		bp->retries++;
-		dbg ("Retry count = %d", bp->retries);
-		/* send a long dummy control-write-message to allow device firmware to react */
-		bp->dr->bRequestType = AUT_WREQ;
-		bp->dr->bRequest     = AUV_DUMMY;
-		bp->dr->wValue       = bp->dr->wLength; /* temporary storage */
-		// bp->dr->wIndex    channel ID remains
-		bp->dr->wLength      = cpu_to_le16 (32); /* >= 8 bytes */
-		usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
-  			(unsigned char*)bp->dr, bp->bufp, 32,
-	   		auerswald_ctrlread_wretcomplete,bp);
-
-		/* submit the control msg as next paket */
-       		ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
-       		if (ret) {
-               		dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
-               		bp->urbp->status = ret;
-               		auerswald_ctrlread_wretcomplete (bp->urbp);
-		}
-                return;
-        }
-
-        /* get the actual bytecount (incl. headerbyte) */
-        bp->len = urb->actual_length;
-        serviceid = bp->bufp[0] & AUH_TYPEMASK;
-        dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len);
-
-        /* dispatch the paket */
-        scp = cp->services[serviceid];
-        if (scp) {
-                /* look, Ma, a listener! */
-                scp->dispatch (scp, bp);
-        }
-
-        /* release the paket */
-        auerbuf_releasebuf (bp);
-	/* Wake up all processes waiting for a buffer */
-	wake_up (&cp->bufferwait);
-}
-
-/*-------------------------------------------------------------------*/
-/* Handling of Interrupt Endpoint                                    */
-/* This interrupt Endpoint is used to inform the host about waiting
-   messages from the USB device.
-*/
-/* int completion handler. */
-static void auerswald_int_complete (struct urb * urb)
-{
-        unsigned long flags;
-        unsigned  int channelid;
-        unsigned  int bytecount;
-        int ret;
-	int status = urb->status;
-        pauerbuf_t   bp = NULL;
-	pauerswald_t cp = urb->context;
-
-        dbg ("%s called", __func__);
-
-	switch (status) {
-	case 0:
-		/* success */
-		break;
-	case -ECONNRESET:
-	case -ENOENT:
-	case -ESHUTDOWN:
-		/* this urb is terminated, clean up */
-		dbg("%s - urb shutting down with status: %d", __func__, status);
-		return;
-	default:
-		dbg("%s - nonzero urb status received: %d", __func__, status);
-		goto exit;
-	}
-
-        /* check if all needed data was received */
-	if (urb->actual_length < AU_IRQMINSIZE) {
-                dbg ("invalid data length received: %d bytes", urb->actual_length);
-		goto exit;
-        }
-
-        /* check the command code */
-        if (cp->intbufp[0] != AU_IRQCMDID) {
-                dbg ("invalid command received: %d", cp->intbufp[0]);
-		goto exit;
-        }
-
-        /* check the command type */
-        if (cp->intbufp[1] != AU_BLOCKRDY) {
-                dbg ("invalid command type received: %d", cp->intbufp[1]);
-		goto exit;
-        }
-
-        /* now extract the information */
-        channelid = cp->intbufp[2];
-        bytecount = (unsigned char)cp->intbufp[3];
-        bytecount |= (unsigned char)cp->intbufp[4] << 8;
-
-        /* check the channel id */
-        if (channelid >= AUH_TYPESIZE) {
-                dbg ("invalid channel id received: %d", channelid);
-		goto exit;
-        }
-
-        /* check the byte count */
-        if (bytecount > (cp->maxControlLength+AUH_SIZE)) {
-                dbg ("invalid byte count received: %d", bytecount);
-		goto exit;
-        }
-        dbg ("Service Channel = %d", channelid);
-        dbg ("Byte Count = %d", bytecount);
-
-        /* get a buffer for the next data paket */
-        spin_lock_irqsave (&cp->bufctl.lock, flags);
-        if (!list_empty (&cp->bufctl.free_buff_list)) {
-                /* yes: get the entry */
-                struct list_head *tmp = cp->bufctl.free_buff_list.next;
-                list_del (tmp);
-                bp = list_entry (tmp, auerbuf_t, buff_list);
-        }
-        spin_unlock_irqrestore (&cp->bufctl.lock, flags);
-
-        /* if no buffer available: skip it */
-        if (!bp) {
-                dbg ("auerswald_int_complete: no data buffer available");
-                /* can we do something more?
-		   This is a big problem: if this int packet is ignored, the
-		   device will wait forever and not signal any more data.
-		   The only real solution is: having enough buffers!
-		   Or perhaps temporary disabling the int endpoint?
-		*/
-		goto exit;
-        }
-
-	/* fill the control message */
-        bp->dr->bRequestType = AUT_RREQ;
-	bp->dr->bRequest     = AUV_RBLOCK;
-	bp->dr->wValue       = cpu_to_le16 (0);
-	bp->dr->wIndex       = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
-	bp->dr->wLength      = cpu_to_le16 (bytecount);
-	usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
-                          (unsigned char*)bp->dr, bp->bufp, bytecount,
-		          auerswald_ctrlread_complete,bp);
-
-        /* submit the control msg */
-        ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
-        if (ret) {
-                dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret);
-                bp->urbp->status = ret;
-                auerswald_ctrlread_complete( bp->urbp);
-		/* here applies the same problem as above: device locking! */
-        }
-exit:
-	ret = usb_submit_urb (urb, GFP_ATOMIC);
-	if (ret)
-		err ("%s - usb_submit_urb failed with result %d",
-		     __func__, ret);
-}
-
-/* int memory deallocation
-   NOTE: no mutex please!
-*/
-static void auerswald_int_free (pauerswald_t cp)
-{
-	if (cp->inturbp) {
-		usb_free_urb(cp->inturbp);
-		cp->inturbp = NULL;
-	}
-	kfree(cp->intbufp);
-	cp->intbufp = NULL;
-}
-
-/* This function is called to activate the interrupt
-   endpoint. This function returns 0 if successful or an error code.
-   NOTE: no mutex please!
-*/
-static int auerswald_int_open (pauerswald_t cp)
-{
-        int ret;
-	struct usb_host_endpoint *ep;
-	int irqsize;
-	dbg ("auerswald_int_open");
-
-	ep = cp->usbdev->ep_in[AU_IRQENDP];
-	if (!ep) {
-		ret = -EFAULT;
-  		goto intoend;
-    	}
-	irqsize = le16_to_cpu(ep->desc.wMaxPacketSize);
-	cp->irqsize = irqsize;
-
-	/* allocate the urb and data buffer */
-        if (!cp->inturbp) {
-                cp->inturbp = usb_alloc_urb (0, GFP_KERNEL);
-                if (!cp->inturbp) {
-                        ret = -ENOMEM;
-                        goto intoend;
-                }
-        }
-        if (!cp->intbufp) {
-                cp->intbufp = kmalloc (irqsize, GFP_KERNEL);
-                if (!cp->intbufp) {
-                        ret = -ENOMEM;
-                        goto intoend;
-                }
-        }
-        /* setup urb */
-        usb_fill_int_urb (cp->inturbp, cp->usbdev,
-			usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp,
-			irqsize, auerswald_int_complete, cp, ep->desc.bInterval);
-        /* start the urb */
-	cp->inturbp->status = 0;	/* needed! */
-	ret = usb_submit_urb (cp->inturbp, GFP_KERNEL);
-
-intoend:
-        if (ret < 0) {
-                /* activation of interrupt endpoint has failed. Now clean up. */
-                dbg ("auerswald_int_open: activation of int endpoint failed");
-
-                /* deallocate memory */
-                auerswald_int_free (cp);
-        }
-        return ret;
-}
-
-/* This function is called to deactivate the interrupt
-   endpoint. This function returns 0 if successful or an error code.
-   NOTE: no mutex please!
-*/
-static void auerswald_int_release (pauerswald_t cp)
-{
-        dbg ("auerswald_int_release");
-
-        /* stop the int endpoint */
-	usb_kill_urb (cp->inturbp);
-
-        /* deallocate memory */
-        auerswald_int_free (cp);
-}
-
-/* --------------------------------------------------------------------- */
-/* Helper functions                                                      */
-
-/* wake up waiting readers */
-static void auerchar_disconnect (pauerscon_t scp)
-{
-        pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
-	dbg ("auerchar_disconnect called");
-	ccp->removed = 1;
-	wake_up (&ccp->readwait);
-}
-
-
-/* dispatch a read paket to a waiting character device */
-static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp)
-{
-	unsigned long flags;
-        pauerchar_t ccp;
-        pauerbuf_t newbp = NULL;
-        char * charp;
-        dbg ("auerchar_ctrlread_dispatch called");
-        ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
-
-        /* get a read buffer from character device context */
-        spin_lock_irqsave (&ccp->bufctl.lock, flags);
-        if (!list_empty (&ccp->bufctl.free_buff_list)) {
-                /* yes: get the entry */
-                struct list_head *tmp = ccp->bufctl.free_buff_list.next;
-                list_del (tmp);
-                newbp = list_entry (tmp, auerbuf_t, buff_list);
-        }
-        spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
-
-        if (!newbp) {
-                dbg ("No read buffer available, discard paket!");
-                return;     /* no buffer, no dispatch */
-        }
-
-        /* copy information to new buffer element
-           (all buffers have the same length) */
-        charp = newbp->bufp;
-        newbp->bufp = bp->bufp;
-        bp->bufp = charp;
-        newbp->len = bp->len;
-
-        /* insert new buffer in read list */
-        spin_lock_irqsave (&ccp->bufctl.lock, flags);
-	list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list);
-        spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
-        dbg ("read buffer appended to rec_list");
-
-        /* wake up pending synchronous reads */
-	wake_up (&ccp->readwait);
-}
-
-
-/* Delete an auerswald driver context */
-static void auerswald_delete( pauerswald_t cp)
-{
-	dbg( "auerswald_delete");
-	if (cp == NULL)
-		return;
-
-	/* Wake up all processes waiting for a buffer */
-	wake_up (&cp->bufferwait);
-
-	/* Cleaning up */
-	auerswald_int_release (cp);
-	auerchain_free (&cp->controlchain);
-	auerbuf_free_buffers (&cp->bufctl);
-
-	/* release the memory */
-	kfree( cp);
-}
-
-
-/* Delete an auerswald character context */
-static void auerchar_delete( pauerchar_t ccp)
-{
-	dbg ("auerchar_delete");
-	if (ccp == NULL)
-		return;
-
-        /* wake up pending synchronous reads */
-	ccp->removed = 1;
-	wake_up (&ccp->readwait);
-
-	/* remove the read buffer */
-	if (ccp->readbuf) {
-		auerbuf_releasebuf (ccp->readbuf);
-		ccp->readbuf = NULL;
-	}
-
-	/* remove the character buffers */
-	auerbuf_free_buffers (&ccp->bufctl);
-
-	/* release the memory */
-	kfree( ccp);
-}
-
-
-/* add a new service to the device
-   scp->id must be set!
-   return: 0 if OK, else error code
-*/
-static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
-{
-	int ret;
-
-	/* is the device available? */
-	if (!cp->usbdev) {
-		dbg ("usbdev == NULL");
-		return -EIO;	/*no: can not add a service, sorry*/
-	}
-
-	/* is the service available? */
-	if (cp->services[scp->id]) {
-		dbg ("service is busy");
-                return -EBUSY;
-	}
-
-	/* device is available, service is free */
-	cp->services[scp->id] = scp;
-
-	/* register service in device */
-	ret = auerchain_control_msg(
-		&cp->controlchain,                      /* pointer to control chain */
-		cp->usbdev,                             /* pointer to device */
-		usb_sndctrlpipe (cp->usbdev, 0),        /* pipe to control endpoint */
-		AUV_CHANNELCTL,                         /* USB message request value */
-		AUT_WREQ,                               /* USB message request type value */
-		0x01,              /* open                 USB message value */
-		scp->id,            		        /* USB message index value */
-		NULL,                                   /* pointer to the data to send */
-		0,                                      /* length in bytes of the data to send */
-		HZ * 2);                                /* time to wait for the message to complete before timing out */
-	if (ret < 0) {
-		dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret);
-		/* undo above actions */
-		cp->services[scp->id] = NULL;
-		return ret;
-	}
-
-	dbg ("auerswald_addservice: channel open OK");
-	return 0;
-}
-
-
-/* remove a service from the device
-   scp->id must be set! */
-static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
-{
-	dbg ("auerswald_removeservice called");
-
-	/* check if we have a service allocated */
-	if (scp->id == AUH_UNASSIGNED)
-		return;
-
-	/* If there is a device: close the channel */
-	if (cp->usbdev) {
-		/* Close the service channel inside the device */
-		int ret = auerchain_control_msg(
-		&cp->controlchain,            		/* pointer to control chain */
-		cp->usbdev,         		        /* pointer to device */
-		usb_sndctrlpipe (cp->usbdev, 0),	/* pipe to control endpoint */
-		AUV_CHANNELCTL,                         /* USB message request value */
-		AUT_WREQ,                               /* USB message request type value */
-		0x00,              // close             /* USB message value */
-		scp->id,            		        /* USB message index value */
-		NULL,                                   /* pointer to the data to send */
-		0,                                      /* length in bytes of the data to send */
-		HZ * 2);                                /* time to wait for the message to complete before timing out */
-		if (ret < 0) {
-			dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret);
-		}
-		else {
-			dbg ("auerswald_removeservice: channel close OK");
-		}
-	}
-
-	/* remove the service from the device */
-	cp->services[scp->id] = NULL;
-	scp->id = AUH_UNASSIGNED;
-}
-
-
-/* --------------------------------------------------------------------- */
-/* Char device functions                                                 */
-
-/* Open a new character device */
-static int auerchar_open (struct inode *inode, struct file *file)
-{
-	int dtindex = iminor(inode);
-	pauerswald_t cp = NULL;
-	pauerchar_t ccp = NULL;
-	struct usb_interface *intf;
-        int ret;
-
-        /* minor number in range? */
-	if (dtindex < 0) {
-		return -ENODEV;
-        }
-	intf = usb_find_interface(&auerswald_driver, dtindex);
-	if (!intf) {
-		return -ENODEV;
-	}
-
-	/* usb device available? */
-	cp = usb_get_intfdata (intf);
-	if (cp == NULL) {
-		return -ENODEV;
-	}
-	if (mutex_lock_interruptible(&cp->mutex)) {
-		return -ERESTARTSYS;
-	}
-
-	/* we have access to the device. Now lets allocate memory */
-	ccp = kzalloc(sizeof(auerchar_t), GFP_KERNEL);
-	if (ccp == NULL) {
-		err ("out of memory");
-		ret = -ENOMEM;
-		goto ofail;
-	}
-
-	/* Initialize device descriptor */
-	mutex_init(&ccp->mutex);
-	mutex_init(&ccp->readmutex);
-        auerbuf_init (&ccp->bufctl);
-        ccp->scontext.id = AUH_UNASSIGNED;
-        ccp->scontext.dispatch = auerchar_ctrlread_dispatch;
-	ccp->scontext.disconnect = auerchar_disconnect;
-	init_waitqueue_head (&ccp->readwait);
-
-	ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE);
-       	if (ret) {
-		goto ofail;
-	}
-
-	cp->open_count++;
-	ccp->auerdev = cp;
-	dbg("open %s as /dev/%s", cp->dev_desc, cp->name);
-	mutex_unlock(&cp->mutex);
-
-	/* file IO stuff */
-	file->f_pos = 0;
-	file->private_data = ccp;
-	return nonseekable_open(inode, file);
-
-	/* Error exit */
-ofail:	mutex_unlock(&cp->mutex);
-	auerchar_delete (ccp);
-	return ret;
-}
-
-
-/* IOCTL functions */
-static long auerchar_ioctl(struct file *file, unsigned int cmd,
-							unsigned long arg)
-{
-	pauerchar_t ccp = (pauerchar_t) file->private_data;
-	int ret = 0;
-        audevinfo_t devinfo;
-        pauerswald_t cp = NULL;
-	unsigned int u;
-	unsigned int __user *user_arg = (unsigned int __user *)arg;
-
-        dbg ("ioctl");
-
-	/* get the mutexes */
-	if (mutex_lock_interruptible(&ccp->mutex)) {
-		return -ERESTARTSYS;
-	}
-	cp = ccp->auerdev;
-	if (!cp) {
-		mutex_unlock(&ccp->mutex);
-                return -ENODEV;
-	}
-	if (mutex_lock_interruptible(&cp->mutex)) {
-		mutex_unlock(&ccp->mutex);
-		return -ERESTARTSYS;
-	}
-
-	/* Check for removal */
-	if (!cp->usbdev) {
-		mutex_unlock(&cp->mutex);
-		mutex_unlock(&ccp->mutex);
-                return -ENODEV;
-	}
-	lock_kernel();
-	switch (cmd) {
-
-	/* return != 0 if Transmitt channel ready to send */
-	case IOCTL_AU_TXREADY:
-		dbg ("IOCTL_AU_TXREADY");
-		u   = ccp->auerdev
-		   && (ccp->scontext.id != AUH_UNASSIGNED)
-		   && !list_empty (&cp->bufctl.free_buff_list);
-	        ret = put_user (u, user_arg);
-		break;
-
-	/* return != 0 if connected to a service channel */
-	case IOCTL_AU_CONNECT:
-		dbg ("IOCTL_AU_CONNECT");
-		u = (ccp->scontext.id != AUH_UNASSIGNED);
-	        ret = put_user (u, user_arg);
-		break;
-
-	/* return != 0 if Receive Data available */
-	case IOCTL_AU_RXAVAIL:
-		dbg ("IOCTL_AU_RXAVAIL");
-		if (ccp->scontext.id == AUH_UNASSIGNED) {
-                        ret = -EIO;
-                        break;
-                }
-		u = 0;	/* no data */
-		if (ccp->readbuf) {
-			int restlen = ccp->readbuf->len - ccp->readoffset;
-			if (restlen > 0)
-				u = 1;
-		}
-		if (!u) {
-        		if (!list_empty (&ccp->bufctl.rec_buff_list)) {
-				u = 1;
-			}
-		}
-	        ret = put_user (u, user_arg);
-		break;
-
-	/* return the max. buffer length for the device */
-	case IOCTL_AU_BUFLEN:
-		dbg ("IOCTL_AU_BUFLEN");
-		u = cp->maxControlLength;
-	        ret = put_user (u, user_arg);
-		break;
-
-	/* requesting a service channel */
-        case IOCTL_AU_SERVREQ:
-		dbg ("IOCTL_AU_SERVREQ");
-                /* requesting a service means: release the previous one first */
-		auerswald_removeservice (cp, &ccp->scontext);
-		/* get the channel number */
-		ret = get_user (u, user_arg);
-		if (ret) {
-			break;
-		}
-		if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) {
-                        ret = -EIO;
-                        break;
-                }
-                dbg ("auerchar service request parameters are ok");
-		ccp->scontext.id = u;
-
-		/* request the service now */
-		ret = auerswald_addservice (cp, &ccp->scontext);
-		if (ret) {
-			/* no: revert service entry */
-                	ccp->scontext.id = AUH_UNASSIGNED;
-		}
-		break;
-
-	/* get a string descriptor for the device */
-	case IOCTL_AU_DEVINFO:
-		dbg ("IOCTL_AU_DEVINFO");
-                if (copy_from_user (&devinfo, (void __user *) arg, sizeof (audevinfo_t))) {
-        		ret = -EFAULT;
-	        	break;
-                }
-		u = strlen(cp->dev_desc)+1;
-		if (u > devinfo.bsize) {
-			u = devinfo.bsize;
-		}
-		ret = copy_to_user(devinfo.buf, cp->dev_desc, u) ? -EFAULT : 0;
-		break;
-
-	/* get the max. string descriptor length */
-        case IOCTL_AU_SLEN:
-		dbg ("IOCTL_AU_SLEN");
-		u = AUSI_DLEN;
-	        ret = put_user (u, user_arg);
-		break;
-
-	default:
-		dbg ("IOCTL_AU_UNKNOWN");
-		ret = -ENOTTY;
-		break;
-        }
-        unlock_kernel();
-	/* release the mutexes */
-	mutex_unlock(&cp->mutex);
-	mutex_unlock(&ccp->mutex);
-	return ret;
-}
-
-/* Read data from the device */
-static ssize_t auerchar_read (struct file *file, char __user *buf, size_t count, loff_t * ppos)
-{
-        unsigned long flags;
-	pauerchar_t ccp = (pauerchar_t) file->private_data;
-        pauerbuf_t   bp = NULL;
-	wait_queue_t wait;
-
-        dbg ("auerchar_read");
-
-	/* Error checking */
-	if (!ccp)
-		return -EIO;
-	if (*ppos)
- 		return -ESPIPE;
-        if (count == 0)
-		return 0;
-
-	/* get the mutex */
-	if (mutex_lock_interruptible(&ccp->mutex))
-		return -ERESTARTSYS;
-
-	/* Can we expect to read something? */
-	if (ccp->scontext.id == AUH_UNASSIGNED) {
-		mutex_unlock(&ccp->mutex);
-                return -EIO;
-	}
-
-	/* only one reader per device allowed */
-	if (mutex_lock_interruptible(&ccp->readmutex)) {
-		mutex_unlock(&ccp->mutex);
-		return -ERESTARTSYS;
-	}
-
-	/* read data from readbuf, if available */
-doreadbuf:
-	bp = ccp->readbuf;
-	if (bp) {
-		/* read the maximum bytes */
-		int restlen = bp->len - ccp->readoffset;
-		if (restlen < 0)
-			restlen = 0;
-		if (count > restlen)
-			count = restlen;
-		if (count) {
-			if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) {
-				dbg ("auerswald_read: copy_to_user failed");
-				mutex_unlock(&ccp->readmutex);
-				mutex_unlock(&ccp->mutex);
-				return -EFAULT;
-			}
-		}
-		/* advance the read offset */
-		ccp->readoffset += count;
-		restlen -= count;
-		// reuse the read buffer
-		if (restlen <= 0) {
-			auerbuf_releasebuf (bp);
-			ccp->readbuf = NULL;
-		}
-		/* return with number of bytes read */
-		if (count) {
-			mutex_unlock(&ccp->readmutex);
-			mutex_unlock(&ccp->mutex);
-			return count;
-		}
-	}
-
-	/* a read buffer is not available. Try to get the next data block. */
-doreadlist:
-	/* Preparing for sleep */
-	init_waitqueue_entry (&wait, current);
-	set_current_state (TASK_INTERRUPTIBLE);
-	add_wait_queue (&ccp->readwait, &wait);
-
-	bp = NULL;
-	spin_lock_irqsave (&ccp->bufctl.lock, flags);
-        if (!list_empty (&ccp->bufctl.rec_buff_list)) {
-                /* yes: get the entry */
-                struct list_head *tmp = ccp->bufctl.rec_buff_list.next;
-                list_del (tmp);
-                bp = list_entry (tmp, auerbuf_t, buff_list);
-        }
-        spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
-
-	/* have we got data? */
-	if (bp) {
-		ccp->readbuf = bp;
-		ccp->readoffset = AUH_SIZE; /* for headerbyte */
-		set_current_state (TASK_RUNNING);
-		remove_wait_queue (&ccp->readwait, &wait);
-		goto doreadbuf;		  /* now we can read! */
-	}
-
-	/* no data available. Should we wait? */
-	if (file->f_flags & O_NONBLOCK) {
-                dbg ("No read buffer available, returning -EAGAIN");
-		set_current_state (TASK_RUNNING);
-		remove_wait_queue (&ccp->readwait, &wait);
-		mutex_unlock(&ccp->readmutex);
-		mutex_unlock(&ccp->mutex);
-		return -EAGAIN;  /* nonblocking, no data available */
-        }
-
-	/* yes, we should wait! */
-	mutex_unlock(&ccp->mutex); /* allow other operations while we wait */
-	schedule();
-	remove_wait_queue (&ccp->readwait, &wait);
-	if (signal_pending (current)) {
-		/* waked up by a signal */
-		mutex_unlock(&ccp->readmutex);
-		return -ERESTARTSYS;
-	}
-
-	/* Anything left to read? */
-	if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) {
-		mutex_unlock(&ccp->readmutex);
-		return -EIO;
-	}
-
-	if (mutex_lock_interruptible(&ccp->mutex)) {
-		mutex_unlock(&ccp->readmutex);
-		return -ERESTARTSYS;
-	}
-
-	/* try to read the incoming data again */
-	goto doreadlist;
-}
-
-
-/* Write a data block into the right service channel of the device */
-static ssize_t auerchar_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
-{
-	pauerchar_t ccp = (pauerchar_t) file->private_data;
-        pauerswald_t cp = NULL;
-        pauerbuf_t bp;
-        unsigned long flags;
-	int ret;
-	wait_queue_t wait;
-
-        dbg ("auerchar_write %zd bytes", len);
-
-	/* Error checking */
-	if (!ccp)
-		return -EIO;
-        if (*ppos)
-		return -ESPIPE;
-        if (len == 0)
-                return 0;
-
-write_again:
-	/* get the mutex */
-	if (mutex_lock_interruptible(&ccp->mutex))
-		return -ERESTARTSYS;
-
-	/* Can we expect to write something? */
-	if (ccp->scontext.id == AUH_UNASSIGNED) {
-		mutex_unlock(&ccp->mutex);
-                return -EIO;
-	}
-
-	cp = ccp->auerdev;
-	if (!cp) {
-		mutex_unlock(&ccp->mutex);
-		return -ERESTARTSYS;
-	}
-	if (mutex_lock_interruptible(&cp->mutex)) {
-		mutex_unlock(&ccp->mutex);
-		return -ERESTARTSYS;
-	}
-	if (!cp->usbdev) {
-		mutex_unlock(&cp->mutex);
-		mutex_unlock(&ccp->mutex);
-		return -EIO;
-	}
-	/* Prepare for sleep */
-	init_waitqueue_entry (&wait, current);
-	set_current_state (TASK_INTERRUPTIBLE);
-	add_wait_queue (&cp->bufferwait, &wait);
-
-	/* Try to get a buffer from the device pool.
-	   We can't use a buffer from ccp->bufctl because the write
-	   command will last beond a release() */
-	bp = NULL;
-	spin_lock_irqsave (&cp->bufctl.lock, flags);
-        if (!list_empty (&cp->bufctl.free_buff_list)) {
-                /* yes: get the entry */
-                struct list_head *tmp = cp->bufctl.free_buff_list.next;
-                list_del (tmp);
-                bp = list_entry (tmp, auerbuf_t, buff_list);
-        }
-        spin_unlock_irqrestore (&cp->bufctl.lock, flags);
-
-	/* are there any buffers left? */
-	if (!bp) {
-		mutex_unlock(&cp->mutex);
-		mutex_unlock(&ccp->mutex);
-
-		/* NONBLOCK: don't wait */
-		if (file->f_flags & O_NONBLOCK) {
-			set_current_state (TASK_RUNNING);
-			remove_wait_queue (&cp->bufferwait, &wait);
-			return -EAGAIN;
-		}
-
-		/* BLOCKING: wait */
-		schedule();
-		remove_wait_queue (&cp->bufferwait, &wait);
-		if (signal_pending (current)) {
-			/* waked up by a signal */
-			return -ERESTARTSYS;
-		}
-		goto write_again;
-	} else {
-		set_current_state (TASK_RUNNING);
-		remove_wait_queue (&cp->bufferwait, &wait);
-	}
-
-	/* protect against too big write requests */
-	if (len > cp->maxControlLength)
-		len = cp->maxControlLength;
-
-	/* Fill the buffer */
-	if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) {
-		dbg ("copy_from_user failed");
-		auerbuf_releasebuf (bp);
-		/* Wake up all processes waiting for a buffer */
-		wake_up (&cp->bufferwait);
-		mutex_unlock(&cp->mutex);
-		mutex_unlock(&ccp->mutex);
-		return -EFAULT;
-	}
-
-	/* set the header byte */
-        *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT;
-
-	/* Set the transfer Parameters */
-	bp->len = len+AUH_SIZE;
-        bp->dr->bRequestType = AUT_WREQ;
-	bp->dr->bRequest     = AUV_WBLOCK;
-	bp->dr->wValue       = cpu_to_le16 (0);
-	bp->dr->wIndex       = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
-	bp->dr->wLength      = cpu_to_le16 (len+AUH_SIZE);
-	usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
-                   (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE,
-		    auerchar_ctrlwrite_complete, bp);
-	/* up we go */
-	ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
-	mutex_unlock(&cp->mutex);
-	if (ret) {
-		dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret);
-		auerbuf_releasebuf (bp);
-		/* Wake up all processes waiting for a buffer */
-		wake_up (&cp->bufferwait);
-		mutex_unlock(&ccp->mutex);
-		return -EIO;
-	}
-	else {
-		dbg ("auerchar_write: Write OK");
-		mutex_unlock(&ccp->mutex);
-		return len;
-	}
-}
-
-
-/* Close a character device */
-static int auerchar_release (struct inode *inode, struct file *file)
-{
-	pauerchar_t ccp = (pauerchar_t) file->private_data;
-	pauerswald_t cp;
-	dbg("release");
-
-	mutex_lock(&ccp->mutex);
-	cp = ccp->auerdev;
-	if (cp) {
-		mutex_lock(&cp->mutex);
-		/* remove an open service */
-		auerswald_removeservice (cp, &ccp->scontext);
-		/* detach from device */
-		if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) {
-			/* usb device waits for removal */
-			mutex_unlock(&cp->mutex);
-			auerswald_delete (cp);
-		} else {
-			mutex_unlock(&cp->mutex);
-		}
-		cp = NULL;
-		ccp->auerdev = NULL;
-	}
-	mutex_unlock(&ccp->mutex);
-	auerchar_delete (ccp);
-
-	return 0;
-}
-
-
-/*----------------------------------------------------------------------*/
-/* File operation structure                                             */
-static const struct file_operations auerswald_fops =
-{
-	.owner =	THIS_MODULE,
-	.llseek =	no_llseek,
-	.read =		auerchar_read,
-	.write =        auerchar_write,
-	.unlocked_ioctl = auerchar_ioctl,
-	.open =		auerchar_open,
-	.release =	auerchar_release,
-};
-
-static struct usb_class_driver auerswald_class = {
-	.name =		"auer%d",
-	.fops =		&auerswald_fops,
-	.minor_base =	AUER_MINOR_BASE,
-};
-
-
-/* --------------------------------------------------------------------- */
-/* Special USB driver functions                                          */
-
-/* Probe if this driver wants to serve an USB device
-
-   This entry point is called whenever a new device is attached to the bus.
-   Then the device driver has to create a new instance of its internal data
-   structures for the new device.
-
-   The  dev argument specifies the device context, which contains pointers
-   to all USB descriptors. The  interface argument specifies the interface
-   number. If a USB driver wants to bind itself to a particular device and
-   interface it has to return a pointer. This pointer normally references
-   the device driver's context structure.
-
-   Probing normally is done by checking the vendor and product identifications
-   or the class and subclass definitions. If they match the interface number
-   is compared with the ones supported by the driver. When probing is done
-   class based it might be necessary to parse some more USB descriptors because
-   the device properties can differ in a wide range.
-*/
-static int auerswald_probe (struct usb_interface *intf,
-			    const struct usb_device_id *id)
-{
-	struct usb_device *usbdev = interface_to_usbdev(intf);
-	pauerswald_t cp = NULL;
-	unsigned int u = 0;
-	__le16 *pbuf;
-	int ret;
-
-	dbg ("probe: vendor id 0x%x, device id 0x%x",
-	     le16_to_cpu(usbdev->descriptor.idVendor),
-	     le16_to_cpu(usbdev->descriptor.idProduct));
-
-        /* we use only the first -and only- interface */
-        if (intf->altsetting->desc.bInterfaceNumber != 0)
-		return -ENODEV;
-
-	/* allocate memory for our device and initialize it */
-	cp = kzalloc (sizeof(auerswald_t), GFP_KERNEL);
-	if (cp == NULL) {
-		err ("out of memory");
-		goto pfail;
-	}
-
-	/* Initialize device descriptor */
-	mutex_init(&cp->mutex);
-	cp->usbdev = usbdev;
-	auerchain_init (&cp->controlchain);
-        auerbuf_init (&cp->bufctl);
-	init_waitqueue_head (&cp->bufferwait);
-
-	ret = usb_register_dev(intf, &auerswald_class);
-	if (ret) {
-		err ("Not able to get a minor for this device.");
-		goto pfail;
-	}
-
-	/* Give the device a name */
-	sprintf (cp->name, "usb/auer%d", intf->minor);
-
-	/* Store the index */
-	cp->dtindex = intf->minor;
-
-	/* Get the usb version of the device */
-	cp->version = le16_to_cpu(cp->usbdev->descriptor.bcdDevice);
-	dbg ("Version is %X", cp->version);
-
-	/* allow some time to settle the device */
-	msleep(334);
-
-	/* Try to get a suitable textual description of the device */
-	/* Device name:*/
-	ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1);
-	if (ret >= 0) {
-		u += ret;
-		/* Append Serial Number */
-		memcpy(&cp->dev_desc[u], ",Ser# ", 6);
-		u += 6;
-		ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1);
-		if (ret >= 0) {
-			u += ret;
-			/* Append subscriber number */
-			memcpy(&cp->dev_desc[u], ", ", 2);
-			u += 2;
-			ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1);
-			if (ret >= 0) {
-				u += ret;
-			}
-		}
-	}
-	cp->dev_desc[u] = '\0';
-	info("device is a %s", cp->dev_desc);
-
-        /* get the maximum allowed control transfer length */
-        pbuf = kmalloc(2, GFP_KERNEL);    /* use an allocated buffer because of urb target */
-        if (!pbuf) {
-		err( "out of memory");
-		goto pfail;
-	}
-        ret = usb_control_msg(cp->usbdev,           /* pointer to device */
-                usb_rcvctrlpipe( cp->usbdev, 0 ),   /* pipe to control endpoint */
-                AUV_GETINFO,                        /* USB message request value */
-                AUT_RREQ,                           /* USB message request type value */
-                0,                                  /* USB message value */
-                AUDI_MBCTRANS,                      /* USB message index value */
-                pbuf,                               /* pointer to the receive buffer */
-                2,                                  /* length of the buffer */
-                2000);                            /* time to wait for the message to complete before timing out */
-        if (ret == 2) {
-	        cp->maxControlLength = le16_to_cpup(pbuf);
-                kfree(pbuf);
-                dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength);
-        } else {
-                kfree(pbuf);
-                err("setup: getting max. allowed control transfer length failed with error %d", ret);
-		goto pfail;
-        }
-
-	/* allocate a chain for the control messages */
-        if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) {
-		err ("out of memory");
-		goto pfail;
-	}
-
-        /* allocate buffers for control messages */
-	if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) {
-		err ("out of memory");
-		goto pfail;
-	}
-
-	/* start the interrupt endpoint */
-	if (auerswald_int_open (cp)) {
-		err ("int endpoint failed");
-		goto pfail;
-	}
-
-	/* all OK */
-	usb_set_intfdata (intf, cp);
-	return 0;
-
-	/* Error exit: clean up the memory */
-pfail:	auerswald_delete (cp);
-	return -EIO;
-}
-
-
-/* Disconnect driver from a served device
-
-   This function is called whenever a device which was served by this driver
-   is disconnected.
-
-   The argument  dev specifies the device context and the  driver_context
-   returns a pointer to the previously registered  driver_context of the
-   probe function. After returning from the disconnect function the USB
-   framework completely deallocates all data structures associated with
-   this device. So especially the usb_device structure must not be used
-   any longer by the usb driver.
-*/
-static void auerswald_disconnect (struct usb_interface *intf)
-{
-	pauerswald_t cp = usb_get_intfdata (intf);
-	unsigned int u;
-
-	usb_set_intfdata (intf, NULL);
-	if (!cp)
-		return;
-
-	/* give back our USB minor number */
-	usb_deregister_dev(intf, &auerswald_class);
-
-	mutex_lock(&cp->mutex);
-	info ("device /dev/%s now disconnecting", cp->name);
-
-	/* Stop the interrupt endpoint */
-	auerswald_int_release (cp);
-
-	/* remove the control chain allocated in auerswald_probe
-	   This has the benefit of
-	   a) all pending (a)synchronous urbs are unlinked
-	   b) all buffers dealing with urbs are reclaimed
-	*/
-	auerchain_free (&cp->controlchain);
-
-	if (cp->open_count == 0) {
-		/* nobody is using this device. So we can clean up now */
-		mutex_unlock(&cp->mutex);
-		/* mutex_unlock() is possible here because no other task
-		   can open the device (see above). I don't want
-		   to kfree() a locked mutex. */
-
-		auerswald_delete (cp);
-	} else {
-		/* device is used. Remove the pointer to the
-		   usb device (it's not valid any more). The last
-		   release() will do the clean up */
-		cp->usbdev = NULL;
-		mutex_unlock(&cp->mutex);
-		/* Terminate waiting writers */
-		wake_up (&cp->bufferwait);
-		/* Inform all waiting readers */
-		for ( u = 0; u < AUH_TYPESIZE; u++) {
-			pauerscon_t scp = cp->services[u];
-			if (scp)
-				scp->disconnect( scp);
-		}
-	}
-}
-
-/* Descriptor for the devices which are served by this driver.
-   NOTE: this struct is parsed by the usbmanager install scripts.
-         Don't change without caution!
-*/
-static struct usb_device_id auerswald_ids [] = {
-	{ USB_DEVICE (ID_AUERSWALD, 0x00C0) },          /* COMpact 2104 USB */
-	{ USB_DEVICE (ID_AUERSWALD, 0x00DB) },          /* COMpact 4410/2206 USB */
-	{ USB_DEVICE (ID_AUERSWALD, 0x00DC) }, /* COMpact 4406 DSL */
-	{ USB_DEVICE (ID_AUERSWALD, 0x00DD) }, /* COMpact 2204 USB */
-	{ USB_DEVICE (ID_AUERSWALD, 0x00F1) },          /* Comfort 2000 System Telephone */
-	{ USB_DEVICE (ID_AUERSWALD, 0x00F2) },          /* Comfort 1200 System Telephone */
-        { }			                        /* Terminating entry */
-};
-
-/* Standard module device table */
-MODULE_DEVICE_TABLE (usb, auerswald_ids);
-
-/* Standard usb driver struct */
-static struct usb_driver auerswald_driver = {
-	.name =		"auerswald",
-	.probe =	auerswald_probe,
-	.disconnect =	auerswald_disconnect,
-	.id_table =	auerswald_ids,
-};
-
-
-/* --------------------------------------------------------------------- */
-/* Module loading/unloading                                              */
-
-/* Driver initialisation. Called after module loading.
-   NOTE: there is no concurrency at _init
-*/
-static int __init auerswald_init (void)
-{
-	int result;
-	dbg ("init");
-
-	/* register driver at the USB subsystem */
-	result = usb_register (&auerswald_driver);
-	if (result < 0) {
-		err ("driver could not be registered");
-		return -1;
-	}
-	return 0;
-}
-
-/* Driver deinit. Called before module removal.
-   NOTE: there is no concurrency at _cleanup
-*/
-static void __exit auerswald_cleanup (void)
-{
-	dbg ("cleanup");
-	usb_deregister (&auerswald_driver);
-}
-
-/* --------------------------------------------------------------------- */
-/* Linux device driver module description                                */
-
-MODULE_AUTHOR (DRIVER_AUTHOR);
-MODULE_DESCRIPTION (DRIVER_DESC);
-MODULE_LICENSE ("GPL");
-
-module_init (auerswald_init);
-module_exit (auerswald_cleanup);
-
-/* --------------------------------------------------------------------- */
-
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
new file mode 100644
index 0000000..faca433
--- /dev/null
+++ b/drivers/usb/musb/Kconfig
@@ -0,0 +1,176 @@
+#
+# USB Dual Role (OTG-ready) Controller Drivers
+# for silicon based on Mentor Graphics INVENTRA designs
+#
+
+comment "Enable Host or Gadget support to see Inventra options"
+	depends on !USB && USB_GADGET=n
+
+# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
+config USB_MUSB_HDRC
+	depends on (USB || USB_GADGET) && HAVE_CLK
+	select TWL4030_USB if MACH_OMAP_3430SDP
+	tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
+	help
+	  Say Y here if your system has a dual role high speed USB
+	  controller based on the Mentor Graphics silicon IP.  Then
+	  configure options to match your silicon and the board
+	  it's being used with, including the USB peripheral role,
+	  or the USB host role, or both.
+
+	  Texas Instruments parts using this IP include DaVinci 644x,
+	  OMAP 243x, OMAP 343x, and TUSB 6010.
+
+	  If you do not know what this is, please say N.
+
+	  To compile this driver as a module, choose M here; the
+	  module will be called "musb_hdrc".
+
+config USB_MUSB_SOC
+	boolean
+	depends on USB_MUSB_HDRC
+	default y if ARCH_DAVINCI
+	default y if ARCH_OMAP2430
+	default y if ARCH_OMAP34XX
+	help
+	  Use a static <asm/arch/hdrc_cnf.h> file to describe how the
+	  controller is configured (endpoints, mechanisms, etc) on the
+	  current iteration of a given system-on-chip.
+
+comment "DaVinci 644x USB support"
+	depends on USB_MUSB_HDRC && ARCH_DAVINCI
+
+comment "OMAP 243x high speed USB support"
+	depends on USB_MUSB_HDRC && ARCH_OMAP2430
+
+comment "OMAP 343x high speed USB support"
+	depends on USB_MUSB_HDRC && ARCH_OMAP34XX
+
+config USB_TUSB6010
+	boolean "TUSB 6010 support"
+	depends on USB_MUSB_HDRC && !USB_MUSB_SOC
+	default y
+	help
+	  The TUSB 6010 chip, from Texas Instruments, connects a discrete
+	  HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
+	  (a high speed serial link).  It can use system-specific external
+	  DMA controllers.
+
+choice
+	prompt "Driver Mode"
+	depends on USB_MUSB_HDRC
+	help
+	  Dual-Role devices can support both host and peripheral roles,
+	  as well as a the special "OTG Device" role which can switch
+	  between both roles as needed.
+
+# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
+# OTG needs both roles, not just USB_MUSB_HOST.
+config USB_MUSB_HOST
+	depends on USB
+	bool "USB Host"
+	help
+	  Say Y here if your system supports the USB host role.
+	  If it has a USB "A" (rectangular), "Mini-A" (uncommon),
+	  or "Mini-AB" connector, it supports the host role.
+	  (With a "Mini-AB" connector, you should enable USB OTG.)
+
+# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
+# side support ... OTG needs both roles
+config USB_MUSB_PERIPHERAL
+	depends on USB_GADGET
+	bool "USB Peripheral (gadget stack)"
+	select USB_GADGET_MUSB_HDRC
+	help
+	  Say Y here if your system supports the USB peripheral role.
+	  If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
+	  connector, it supports the peripheral role.
+	  (With a "Mini-AB" connector, you should enable USB OTG.)
+
+config USB_MUSB_OTG
+	depends on USB && USB_GADGET && PM && EXPERIMENTAL
+	bool "Both host and peripheral:  USB OTG (On The Go) Device"
+	select USB_GADGET_MUSB_HDRC
+	select USB_OTG
+	help
+	   The most notable feature of USB OTG is support for a
+	   "Dual-Role" device, which can act as either a device
+	   or a host.  The initial role choice can be changed
+	   later, when two dual-role devices talk to each other.
+
+	   At this writing, the OTG support in this driver is incomplete,
+	   omitting the mandatory HNP or SRP protocols.  However, some
+	   of the cable based role switching works.  (That is, grounding
+	   the ID pin switches the controller to host mode, while leaving
+	   it floating leaves it in peripheral mode.)
+
+	   Select this if your system has a Mini-AB connector, or
+	   to simplify certain kinds of configuration.
+
+	   To implement your OTG Targeted Peripherals List (TPL), enable
+	   USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
+	   to match your requirements.
+
+endchoice
+
+# enable peripheral support (including with OTG)
+config USB_GADGET_MUSB_HDRC
+	bool
+	depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+#	default y
+#	select USB_GADGET_DUALSPEED
+#	select USB_GADGET_SELECTED
+
+# enables host support (including with OTG)
+config USB_MUSB_HDRC_HCD
+	bool
+	depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
+	select USB_OTG if USB_GADGET_MUSB_HDRC
+	default y
+
+
+config MUSB_PIO_ONLY
+	bool 'Disable DMA (always use PIO)'
+	depends on USB_MUSB_HDRC
+	default y if USB_TUSB6010
+	help
+	  All data is copied between memory and FIFO by the CPU.
+	  DMA controllers are ignored.
+
+	  Do not select 'n' here unless DMA support for your SOC or board
+	  is unavailable (or unstable).  When DMA is enabled at compile time,
+	  you can still disable it at run time using the "use_dma=n" module
+	  parameter.
+
+config USB_INVENTRA_DMA
+	bool
+	depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+	default ARCH_OMAP2430 || ARCH_OMAP34XX
+	help
+	  Enable DMA transfers using Mentor's engine.
+
+config USB_TI_CPPI_DMA
+	bool
+	depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+	default ARCH_DAVINCI
+	help
+	  Enable DMA transfers when TI CPPI DMA is available.
+
+config USB_TUSB_OMAP_DMA
+	bool
+	depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+	depends on USB_TUSB6010
+	depends on ARCH_OMAP
+	default y
+	help
+	  Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+
+config	USB_MUSB_LOGLEVEL
+	depends on USB_MUSB_HDRC
+	int  'Logging Level (0 - none / 3 - annoying / ... )'
+	default 0
+	help
+	  Set the logging level. 0 disables the debugging altogether,
+	  although when USB_DEBUG is set the value is at least 1.
+	  Starting at level 3, per-transfer (urb, usb_request, packet,
+	  or dma transfer) tracing may kick in.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
new file mode 100644
index 0000000..88eb67d
--- /dev/null
+++ b/drivers/usb/musb/Makefile
@@ -0,0 +1,86 @@
+#
+# for USB OTG silicon based on Mentor Graphics INVENTRA designs
+#
+
+musb_hdrc-objs := musb_core.o
+
+obj-$(CONFIG_USB_MUSB_HDRC)	+= musb_hdrc.o
+
+ifeq ($(CONFIG_ARCH_DAVINCI),y)
+	musb_hdrc-objs	+= davinci.o
+endif
+
+ifeq ($(CONFIG_USB_TUSB6010),y)
+	musb_hdrc-objs	+= tusb6010.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP2430),y)
+	musb_hdrc-objs	+= omap2430.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP3430),y)
+	musb_hdrc-objs	+= omap2430.o
+endif
+
+ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
+	musb_hdrc-objs		+= musb_gadget_ep0.o musb_gadget.o
+endif
+
+ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
+	musb_hdrc-objs		+= musb_virthub.o musb_host.o
+endif
+
+# the kconfig must guarantee that only one of the
+# possible I/O schemes will be enabled at a time ...
+# PIO only, or DMA (several potential schemes).
+# though PIO is always there to back up DMA, and for ep0
+
+ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
+
+  ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
+    musb_hdrc-objs		+= musbhsdma.o
+
+  else
+    ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
+      musb_hdrc-objs		+= cppi_dma.o
+
+    else
+      ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
+        musb_hdrc-objs		+= tusb6010_omap.o
+
+      endif
+    endif
+  endif
+endif
+
+
+################################################################################
+
+# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
+
+ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
+	EXTRA_CFLAGS += -DMUSB_AHB_ID
+endif
+
+# Debugging
+
+MUSB_DEBUG:=$(CONFIG_USB_MUSB_LOGLEVEL)
+
+ifeq ("$(strip $(MUSB_DEBUG))","")
+    ifdef CONFIG_USB_DEBUG
+	MUSB_DEBUG:=1
+    else
+	MUSB_DEBUG:=0
+    endif
+endif
+
+ifneq ($(MUSB_DEBUG),0)
+    EXTRA_CFLAGS += -DDEBUG
+
+    ifeq ($(CONFIG_PROC_FS),y)
+	musb_hdrc-objs		+= musb_procfs.o
+    endif
+
+endif
+
+EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG)
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
new file mode 100644
index 0000000..5ad6d08
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.c
@@ -0,0 +1,1540 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file implements a DMA  interface using TI's CPPI DMA.
+ * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
+ * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
+ */
+
+#include <linux/usb.h>
+
+#include "musb_core.h"
+#include "cppi_dma.h"
+
+
+/* CPPI DMA status 7-mar-2006:
+ *
+ * - See musb_{host,gadget}.c for more info
+ *
+ * - Correct RX DMA generally forces the engine into irq-per-packet mode,
+ *   which can easily saturate the CPU under non-mass-storage loads.
+ *
+ * NOTES 24-aug-2006 (2.6.18-rc4):
+ *
+ * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
+ *   evidently after the 1 byte packet was received and acked, the queue
+ *   of BDs got garbaged so it wouldn't empty the fifo.  (rxcsr 0x2003,
+ *   and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
+ *   004001ff 00000001 .. 8feff860)  Host was just getting NAKed on tx
+ *   of its next (512 byte) packet.  IRQ issues?
+ *
+ * REVISIT:  the "transfer DMA" glue between CPPI and USB fifos will
+ * evidently also directly update the RX and TX CSRs ... so audit all
+ * host and peripheral side DMA code to avoid CSR access after DMA has
+ * been started.
+ */
+
+/* REVISIT now we can avoid preallocating these descriptors; or
+ * more simply, switch to a global freelist not per-channel ones.
+ * Note: at full speed, 64 descriptors == 4K bulk data.
+ */
+#define NUM_TXCHAN_BD       64
+#define NUM_RXCHAN_BD       64
+
+static inline void cpu_drain_writebuffer(void)
+{
+	wmb();
+#ifdef	CONFIG_CPU_ARM926T
+	/* REVISIT this "should not be needed",
+	 * but lack of it sure seemed to hurt ...
+	 */
+	asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
+#endif
+}
+
+static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
+{
+	struct cppi_descriptor	*bd = c->freelist;
+
+	if (bd)
+		c->freelist = bd->next;
+	return bd;
+}
+
+static inline void
+cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
+{
+	if (!bd)
+		return;
+	bd->next = c->freelist;
+	c->freelist = bd;
+}
+
+/*
+ *  Start DMA controller
+ *
+ *  Initialize the DMA controller as necessary.
+ */
+
+/* zero out entire rx state RAM entry for the channel */
+static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
+{
+	musb_writel(&rx->rx_skipbytes, 0, 0);
+	musb_writel(&rx->rx_head, 0, 0);
+	musb_writel(&rx->rx_sop, 0, 0);
+	musb_writel(&rx->rx_current, 0, 0);
+	musb_writel(&rx->rx_buf_current, 0, 0);
+	musb_writel(&rx->rx_len_len, 0, 0);
+	musb_writel(&rx->rx_cnt_cnt, 0, 0);
+}
+
+/* zero out entire tx state RAM entry for the channel */
+static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
+{
+	musb_writel(&tx->tx_head, 0, 0);
+	musb_writel(&tx->tx_buf, 0, 0);
+	musb_writel(&tx->tx_current, 0, 0);
+	musb_writel(&tx->tx_buf_current, 0, 0);
+	musb_writel(&tx->tx_info, 0, 0);
+	musb_writel(&tx->tx_rem_len, 0, 0);
+	/* musb_writel(&tx->tx_dummy, 0, 0); */
+	musb_writel(&tx->tx_complete, 0, ptr);
+}
+
+static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+{
+	int	j;
+
+	/* initialize channel fields */
+	c->head = NULL;
+	c->tail = NULL;
+	c->last_processed = NULL;
+	c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+	c->controller = cppi;
+	c->is_rndis = 0;
+	c->freelist = NULL;
+
+	/* build the BD Free list for the channel */
+	for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
+		struct cppi_descriptor	*bd;
+		dma_addr_t		dma;
+
+		bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
+		bd->dma = dma;
+		cppi_bd_free(c, bd);
+	}
+}
+
+static int cppi_channel_abort(struct dma_channel *);
+
+static void cppi_pool_free(struct cppi_channel *c)
+{
+	struct cppi		*cppi = c->controller;
+	struct cppi_descriptor	*bd;
+
+	(void) cppi_channel_abort(&c->channel);
+	c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+	c->controller = NULL;
+
+	/* free all its bds */
+	bd = c->last_processed;
+	do {
+		if (bd)
+			dma_pool_free(cppi->pool, bd, bd->dma);
+		bd = cppi_bd_alloc(c);
+	} while (bd);
+	c->last_processed = NULL;
+}
+
+static int __init cppi_controller_start(struct dma_controller *c)
+{
+	struct cppi	*controller;
+	void __iomem	*tibase;
+	int		i;
+
+	controller = container_of(c, struct cppi, controller);
+
+	/* do whatever is necessary to start controller */
+	for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+		controller->tx[i].transmit = true;
+		controller->tx[i].index = i;
+	}
+	for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+		controller->rx[i].transmit = false;
+		controller->rx[i].index = i;
+	}
+
+	/* setup BD list on a per channel basis */
+	for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
+		cppi_pool_init(controller, controller->tx + i);
+	for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+		cppi_pool_init(controller, controller->rx + i);
+
+	tibase =  controller->tibase;
+	INIT_LIST_HEAD(&controller->tx_complete);
+
+	/* initialise tx/rx channel head pointers to zero */
+	for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+		struct cppi_channel	*tx_ch = controller->tx + i;
+		struct cppi_tx_stateram __iomem *tx;
+
+		INIT_LIST_HEAD(&tx_ch->tx_complete);
+
+		tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
+		tx_ch->state_ram = tx;
+		cppi_reset_tx(tx, 0);
+	}
+	for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+		struct cppi_channel	*rx_ch = controller->rx + i;
+		struct cppi_rx_stateram __iomem *rx;
+
+		INIT_LIST_HEAD(&rx_ch->tx_complete);
+
+		rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
+		rx_ch->state_ram = rx;
+		cppi_reset_rx(rx);
+	}
+
+	/* enable individual cppi channels */
+	musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+			DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+	musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
+			DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+	/* enable tx/rx CPPI control */
+	musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+	musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+
+	/* disable RNDIS mode, also host rx RNDIS autorequest */
+	musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
+	musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
+
+	return 0;
+}
+
+/*
+ *  Stop DMA controller
+ *
+ *  De-Init the DMA controller as necessary.
+ */
+
+static int cppi_controller_stop(struct dma_controller *c)
+{
+	struct cppi		*controller;
+	void __iomem		*tibase;
+	int			i;
+
+	controller = container_of(c, struct cppi, controller);
+
+	tibase = controller->tibase;
+	/* DISABLE INDIVIDUAL CHANNEL Interrupts */
+	musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+			DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+	musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
+			DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+	DBG(1, "Tearing down RX and TX Channels\n");
+	for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+		/* FIXME restructure of txdma to use bds like rxdma */
+		controller->tx[i].last_processed = NULL;
+		cppi_pool_free(controller->tx + i);
+	}
+	for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+		cppi_pool_free(controller->rx + i);
+
+	/* in Tx Case proper teardown is supported. We resort to disabling
+	 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
+	 * complete TX CPPI cannot be disabled.
+	 */
+	/*disable tx/rx cppi */
+	musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+	musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+
+	return 0;
+}
+
+/* While dma channel is allocated, we only want the core irqs active
+ * for fault reports, otherwise we'd get irqs that we don't care about.
+ * Except for TX irqs, where dma done != fifo empty and reusable ...
+ *
+ * NOTE: docs don't say either way, but irq masking **enables** irqs.
+ *
+ * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
+ */
+static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
+{
+	musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
+}
+
+static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
+{
+	musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
+}
+
+
+/*
+ * Allocate a CPPI Channel for DMA.  With CPPI, channels are bound to
+ * each transfer direction of a non-control endpoint, so allocating
+ * (and deallocating) is mostly a way to notice bad housekeeping on
+ * the software side.  We assume the irqs are always active.
+ */
+static struct dma_channel *
+cppi_channel_allocate(struct dma_controller *c,
+		struct musb_hw_ep *ep, u8 transmit)
+{
+	struct cppi		*controller;
+	u8			index;
+	struct cppi_channel	*cppi_ch;
+	void __iomem		*tibase;
+
+	controller = container_of(c, struct cppi, controller);
+	tibase = controller->tibase;
+
+	/* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
+	index = ep->epnum - 1;
+
+	/* return the corresponding CPPI Channel Handle, and
+	 * probably disable the non-CPPI irq until we need it.
+	 */
+	if (transmit) {
+		if (index >= ARRAY_SIZE(controller->tx)) {
+			DBG(1, "no %cX%d CPPI channel\n", 'T', index);
+			return NULL;
+		}
+		cppi_ch = controller->tx + index;
+	} else {
+		if (index >= ARRAY_SIZE(controller->rx)) {
+			DBG(1, "no %cX%d CPPI channel\n", 'R', index);
+			return NULL;
+		}
+		cppi_ch = controller->rx + index;
+		core_rxirq_disable(tibase, ep->epnum);
+	}
+
+	/* REVISIT make this an error later once the same driver code works
+	 * with the other DMA engine too
+	 */
+	if (cppi_ch->hw_ep)
+		DBG(1, "re-allocating DMA%d %cX channel %p\n",
+				index, transmit ? 'T' : 'R', cppi_ch);
+	cppi_ch->hw_ep = ep;
+	cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+	DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
+	return &cppi_ch->channel;
+}
+
+/* Release a CPPI Channel.  */
+static void cppi_channel_release(struct dma_channel *channel)
+{
+	struct cppi_channel	*c;
+	void __iomem		*tibase;
+
+	/* REVISIT:  for paranoia, check state and abort if needed... */
+
+	c = container_of(channel, struct cppi_channel, channel);
+	tibase = c->controller->tibase;
+	if (!c->hw_ep)
+		DBG(1, "releasing idle DMA channel %p\n", c);
+	else if (!c->transmit)
+		core_rxirq_enable(tibase, c->index + 1);
+
+	/* for now, leave its cppi IRQ enabled (we won't trigger it) */
+	c->hw_ep = NULL;
+	channel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
+{
+	void __iomem			*base = c->controller->mregs;
+	struct cppi_rx_stateram __iomem	*rx = c->state_ram;
+
+	musb_ep_select(base, c->index + 1);
+
+	DBG(level, "RX DMA%d%s: %d left, csr %04x, "
+			"%08x H%08x S%08x C%08x, "
+			"B%08x L%08x %08x .. %08x"
+			"\n",
+		c->index, tag,
+		musb_readl(c->controller->tibase,
+			DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
+		musb_readw(c->hw_ep->regs, MUSB_RXCSR),
+
+		musb_readl(&rx->rx_skipbytes, 0),
+		musb_readl(&rx->rx_head, 0),
+		musb_readl(&rx->rx_sop, 0),
+		musb_readl(&rx->rx_current, 0),
+
+		musb_readl(&rx->rx_buf_current, 0),
+		musb_readl(&rx->rx_len_len, 0),
+		musb_readl(&rx->rx_cnt_cnt, 0),
+		musb_readl(&rx->rx_complete, 0)
+		);
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
+{
+	void __iomem			*base = c->controller->mregs;
+	struct cppi_tx_stateram __iomem	*tx = c->state_ram;
+
+	musb_ep_select(base, c->index + 1);
+
+	DBG(level, "TX DMA%d%s: csr %04x, "
+			"H%08x S%08x C%08x %08x, "
+			"F%08x L%08x .. %08x"
+			"\n",
+		c->index, tag,
+		musb_readw(c->hw_ep->regs, MUSB_TXCSR),
+
+		musb_readl(&tx->tx_head, 0),
+		musb_readl(&tx->tx_buf, 0),
+		musb_readl(&tx->tx_current, 0),
+		musb_readl(&tx->tx_buf_current, 0),
+
+		musb_readl(&tx->tx_info, 0),
+		musb_readl(&tx->tx_rem_len, 0),
+		/* dummy/unused word 6 */
+		musb_readl(&tx->tx_complete, 0)
+		);
+}
+
+/* Context: controller irqlocked */
+static inline void
+cppi_rndis_update(struct cppi_channel *c, int is_rx,
+		void __iomem *tibase, int is_rndis)
+{
+	/* we may need to change the rndis flag for this cppi channel */
+	if (c->is_rndis != is_rndis) {
+		u32	value = musb_readl(tibase, DAVINCI_RNDIS_REG);
+		u32	temp = 1 << (c->index);
+
+		if (is_rx)
+			temp <<= 16;
+		if (is_rndis)
+			value |= temp;
+		else
+			value &= ~temp;
+		musb_writel(tibase, DAVINCI_RNDIS_REG, value);
+		c->is_rndis = is_rndis;
+	}
+}
+
+static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
+{
+	pr_debug("RXBD/%s %08x: "
+			"nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
+			tag, bd->dma,
+			bd->hw_next, bd->hw_bufp, bd->hw_off_len,
+			bd->hw_options);
+}
+
+static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
+{
+#if MUSB_DEBUG > 0
+	struct cppi_descriptor	*bd;
+
+	if (!_dbg_level(level))
+		return;
+	cppi_dump_rx(level, rx, tag);
+	if (rx->last_processed)
+		cppi_dump_rxbd("last", rx->last_processed);
+	for (bd = rx->head; bd; bd = bd->next)
+		cppi_dump_rxbd("active", bd);
+#endif
+}
+
+
+/* NOTE:  DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
+ * so we won't ever use it (see "CPPI RX Woes" below).
+ */
+static inline int cppi_autoreq_update(struct cppi_channel *rx,
+		void __iomem *tibase, int onepacket, unsigned n_bds)
+{
+	u32	val;
+
+#ifdef	RNDIS_RX_IS_USABLE
+	u32	tmp;
+	/* assert(is_host_active(musb)) */
+
+	/* start from "AutoReq never" */
+	tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+	val = tmp & ~((0x3) << (rx->index * 2));
+
+	/* HCD arranged reqpkt for packet #1.  we arrange int
+	 * for all but the last one, maybe in two segments.
+	 */
+	if (!onepacket) {
+#if 0
+		/* use two segments, autoreq "all" then the last "never" */
+		val |= ((0x3) << (rx->index * 2));
+		n_bds--;
+#else
+		/* one segment, autoreq "all-but-last" */
+		val |= ((0x1) << (rx->index * 2));
+#endif
+	}
+
+	if (val != tmp) {
+		int n = 100;
+
+		/* make sure that autoreq is updated before continuing */
+		musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
+		do {
+			tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+			if (tmp == val)
+				break;
+			cpu_relax();
+		} while (n-- > 0);
+	}
+#endif
+
+	/* REQPKT is turned off after each segment */
+	if (n_bds && rx->channel.actual_len) {
+		void __iomem	*regs = rx->hw_ep->regs;
+
+		val = musb_readw(regs, MUSB_RXCSR);
+		if (!(val & MUSB_RXCSR_H_REQPKT)) {
+			val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
+			musb_writew(regs, MUSB_RXCSR, val);
+			/* flush writebufer */
+			val = musb_readw(regs, MUSB_RXCSR);
+		}
+	}
+	return n_bds;
+}
+
+
+/* Buffer enqueuing Logic:
+ *
+ *  - RX builds new queues each time, to help handle routine "early
+ *    termination" cases (faults, including errors and short reads)
+ *    more correctly.
+ *
+ *  - for now, TX reuses the same queue of BDs every time
+ *
+ * REVISIT long term, we want a normal dynamic model.
+ * ... the goal will be to append to the
+ * existing queue, processing completed "dma buffers" (segments) on the fly.
+ *
+ * Otherwise we force an IRQ latency between requests, which slows us a lot
+ * (especially in "transparent" dma).  Unfortunately that model seems to be
+ * inherent in the DMA model from the Mentor code, except in the rare case
+ * of transfers big enough (~128+ KB) that we could append "middle" segments
+ * in the TX paths.  (RX can't do this, see below.)
+ *
+ * That's true even in the CPPI- friendly iso case, where most urbs have
+ * several small segments provided in a group and where the "packet at a time"
+ * "transparent" DMA model is always correct, even on the RX side.
+ */
+
+/*
+ * CPPI TX:
+ * ========
+ * TX is a lot more reasonable than RX; it doesn't need to run in
+ * irq-per-packet mode very often.  RNDIS mode seems to behave too
+ * (except how it handles the exactly-N-packets case).  Building a
+ * txdma queue with multiple requests (urb or usb_request) looks
+ * like it would work ... but fault handling would need much testing.
+ *
+ * The main issue with TX mode RNDIS relates to transfer lengths that
+ * are an exact multiple of the packet length.  It appears that there's
+ * a hiccup in that case (maybe the DMA completes before the ZLP gets
+ * written?) boiling down to not being able to rely on CPPI writing any
+ * terminating zero length packet before the next transfer is written.
+ * So that's punted to PIO; better yet, gadget drivers can avoid it.
+ *
+ * Plus, there's allegedly an undocumented constraint that rndis transfer
+ * length be a multiple of 64 bytes ... but the chip doesn't act that
+ * way, and we really don't _want_ that behavior anyway.
+ *
+ * On TX, "transparent" mode works ... although experiments have shown
+ * problems trying to use the SOP/EOP bits in different USB packets.
+ *
+ * REVISIT try to handle terminating zero length packets using CPPI
+ * instead of doing it by PIO after an IRQ.  (Meanwhile, make Ethernet
+ * links avoid that issue by forcing them to avoid zlps.)
+ */
+static void
+cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
+{
+	unsigned		maxpacket = tx->maxpacket;
+	dma_addr_t		addr = tx->buf_dma + tx->offset;
+	size_t			length = tx->buf_len - tx->offset;
+	struct cppi_descriptor	*bd;
+	unsigned		n_bds;
+	unsigned		i;
+	struct cppi_tx_stateram	__iomem *tx_ram = tx->state_ram;
+	int			rndis;
+
+	/* TX can use the CPPI "rndis" mode, where we can probably fit this
+	 * transfer in one BD and one IRQ.  The only time we would NOT want
+	 * to use it is when hardware constraints prevent it, or if we'd
+	 * trigger the "send a ZLP?" confusion.
+	 */
+	rndis = (maxpacket & 0x3f) == 0
+		&& length < 0xffff
+		&& (length % maxpacket) != 0;
+
+	if (rndis) {
+		maxpacket = length;
+		n_bds = 1;
+	} else {
+		n_bds = length / maxpacket;
+		if (!length || (length % maxpacket))
+			n_bds++;
+		n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
+		length = min(n_bds * maxpacket, length);
+	}
+
+	DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
+			tx->index,
+			maxpacket,
+			rndis ? "rndis" : "transparent",
+			n_bds,
+			addr, length);
+
+	cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
+
+	/* assuming here that channel_program is called during
+	 * transfer initiation ... current code maintains state
+	 * for one outstanding request only (no queues, not even
+	 * the implicit ones of an iso urb).
+	 */
+
+	bd = tx->freelist;
+	tx->head = bd;
+	tx->last_processed = NULL;
+
+	/* FIXME use BD pool like RX side does, and just queue
+	 * the minimum number for this request.
+	 */
+
+	/* Prepare queue of BDs first, then hand it to hardware.
+	 * All BDs except maybe the last should be of full packet
+	 * size; for RNDIS there _is_ only that last packet.
+	 */
+	for (i = 0; i < n_bds; ) {
+		if (++i < n_bds && bd->next)
+			bd->hw_next = bd->next->dma;
+		else
+			bd->hw_next = 0;
+
+		bd->hw_bufp = tx->buf_dma + tx->offset;
+
+		/* FIXME set EOP only on the last packet,
+		 * SOP only on the first ... avoid IRQs
+		 */
+		if ((tx->offset + maxpacket) <= tx->buf_len) {
+			tx->offset += maxpacket;
+			bd->hw_off_len = maxpacket;
+			bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+				| CPPI_OWN_SET | maxpacket;
+		} else {
+			/* only this one may be a partial USB Packet */
+			u32		partial_len;
+
+			partial_len = tx->buf_len - tx->offset;
+			tx->offset = tx->buf_len;
+			bd->hw_off_len = partial_len;
+
+			bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+				| CPPI_OWN_SET | partial_len;
+			if (partial_len == 0)
+				bd->hw_options |= CPPI_ZERO_SET;
+		}
+
+		DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+				bd, bd->hw_next, bd->hw_bufp,
+				bd->hw_off_len, bd->hw_options);
+
+		/* update the last BD enqueued to the list */
+		tx->tail = bd;
+		bd = bd->next;
+	}
+
+	/* BDs live in DMA-coherent memory, but writes might be pending */
+	cpu_drain_writebuffer();
+
+	/* Write to the HeadPtr in state RAM to trigger */
+	musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
+
+	cppi_dump_tx(5, tx, "/S");
+}
+
+/*
+ * CPPI RX Woes:
+ * =============
+ * Consider a 1KB bulk RX buffer in two scenarios:  (a) it's fed two 300 byte
+ * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
+ * (Full speed transfers have similar scenarios.)
+ *
+ * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
+ * and the next packet goes into a buffer that's queued later; while (b) fills
+ * the buffer with 1024 bytes.  How to do that with CPPI?
+ *
+ * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
+ *   (b) loses **BADLY** because nothing (!) happens when that second packet
+ *   fills the buffer, much less when a third one arrives.  (Which makes this
+ *   not a "true" RNDIS mode.  In the RNDIS protocol short-packet termination
+ *   is optional, and it's fine if peripherals -- not hosts! -- pad messages
+ *   out to end-of-buffer.  Standard PCI host controller DMA descriptors
+ *   implement that mode by default ... which is no accident.)
+ *
+ * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
+ *   converse problems:  (b) is handled right, but (a) loses badly.  CPPI RX
+ *   ignores SOP/EOP markings and processes both of those BDs; so both packets
+ *   are loaded into the buffer (with a 212 byte gap between them), and the next
+ *   buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
+ *   are intended as outputs for RX queues, not inputs...)
+ *
+ * - A variant of "transparent" mode -- one BD at a time -- is the only way to
+ *   reliably make both cases work, with software handling both cases correctly
+ *   and at the significant penalty of needing an IRQ per packet.  (The lack of
+ *   I/O overlap can be slightly ameliorated by enabling double buffering.)
+ *
+ * So how to get rid of IRQ-per-packet?  The transparent multi-BD case could
+ * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
+ * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
+ * with guaranteed driver level fault recovery and scrubbing out what's left
+ * of that garbaged datastream.
+ *
+ * But there seems to be no way to identify the cases where CPPI RNDIS mode
+ * is appropriate -- which do NOT include RNDIS host drivers, but do include
+ * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
+ * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
+ * that applies best on the peripheral side (and which could fail rudely).
+ *
+ * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
+ * cases other than mass storage class.  Otherwise we're correct but slow,
+ * since CPPI penalizes our need for a "true RNDIS" default mode.
+ */
+
+
+/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
+ *
+ * IFF
+ *  (a)	peripheral mode ... since rndis peripherals could pad their
+ *	writes to hosts, causing i/o failure; or we'd have to cope with
+ *	a largely unknowable variety of host side protocol variants
+ *  (b)	and short reads are NOT errors ... since full reads would
+ *	cause those same i/o failures
+ *  (c)	and read length is
+ *	- less than 64KB (max per cppi descriptor)
+ *	- not a multiple of 4096 (g_zero default, full reads typical)
+ *	- N (>1) packets long, ditto (full reads not EXPECTED)
+ * THEN
+ *   try rx rndis mode
+ *
+ * Cost of heuristic failing:  RXDMA wedges at the end of transfers that
+ * fill out the whole buffer.  Buggy host side usb network drivers could
+ * trigger that, but "in the field" such bugs seem to be all but unknown.
+ *
+ * So this module parameter lets the heuristic be disabled.  When using
+ * gadgetfs, the heuristic will probably need to be disabled.
+ */
+static int cppi_rx_rndis = 1;
+
+module_param(cppi_rx_rndis, bool, 0);
+MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
+
+
+/**
+ * cppi_next_rx_segment - dma read for the next chunk of a buffer
+ * @musb: the controller
+ * @rx: dma channel
+ * @onepacket: true unless caller treats short reads as errors, and
+ *	performs fault recovery above usbcore.
+ * Context: controller irqlocked
+ *
+ * See above notes about why we can't use multi-BD RX queues except in
+ * rare cases (mass storage class), and can never use the hardware "rndis"
+ * mode (since it's not a "true" RNDIS mode) with complete safety..
+ *
+ * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
+ * code to recover from corrupted datastreams after each short transfer.
+ */
+static void
+cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
+{
+	unsigned		maxpacket = rx->maxpacket;
+	dma_addr_t		addr = rx->buf_dma + rx->offset;
+	size_t			length = rx->buf_len - rx->offset;
+	struct cppi_descriptor	*bd, *tail;
+	unsigned		n_bds;
+	unsigned		i;
+	void __iomem		*tibase = musb->ctrl_base;
+	int			is_rndis = 0;
+	struct cppi_rx_stateram	__iomem *rx_ram = rx->state_ram;
+
+	if (onepacket) {
+		/* almost every USB driver, host or peripheral side */
+		n_bds = 1;
+
+		/* maybe apply the heuristic above */
+		if (cppi_rx_rndis
+				&& is_peripheral_active(musb)
+				&& length > maxpacket
+				&& (length & ~0xffff) == 0
+				&& (length & 0x0fff) != 0
+				&& (length & (maxpacket - 1)) == 0) {
+			maxpacket = length;
+			is_rndis = 1;
+		}
+	} else {
+		/* virtually nothing except mass storage class */
+		if (length > 0xffff) {
+			n_bds = 0xffff / maxpacket;
+			length = n_bds * maxpacket;
+		} else {
+			n_bds = length / maxpacket;
+			if (length % maxpacket)
+				n_bds++;
+		}
+		if (n_bds == 1)
+			onepacket = 1;
+		else
+			n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
+	}
+
+	/* In host mode, autorequest logic can generate some IN tokens; it's
+	 * tricky since we can't leave REQPKT set in RXCSR after the transfer
+	 * finishes. So:  multipacket transfers involve two or more segments.
+	 * And always at least two IRQs ... RNDIS mode is not an option.
+	 */
+	if (is_host_active(musb))
+		n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
+
+	cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
+
+	length = min(n_bds * maxpacket, length);
+
+	DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+			"dma 0x%x len %u %u/%u\n",
+			rx->index, maxpacket,
+			onepacket
+				? (is_rndis ? "rndis" : "onepacket")
+				: "multipacket",
+			n_bds,
+			musb_readl(tibase,
+				DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+					& 0xffff,
+			addr, length, rx->channel.actual_len, rx->buf_len);
+
+	/* only queue one segment at a time, since the hardware prevents
+	 * correct queue shutdown after unexpected short packets
+	 */
+	bd = cppi_bd_alloc(rx);
+	rx->head = bd;
+
+	/* Build BDs for all packets in this segment */
+	for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
+		u32	bd_len;
+
+		if (i) {
+			bd = cppi_bd_alloc(rx);
+			if (!bd)
+				break;
+			tail->next = bd;
+			tail->hw_next = bd->dma;
+		}
+		bd->hw_next = 0;
+
+		/* all but the last packet will be maxpacket size */
+		if (maxpacket < length)
+			bd_len = maxpacket;
+		else
+			bd_len = length;
+
+		bd->hw_bufp = addr;
+		addr += bd_len;
+		rx->offset += bd_len;
+
+		bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
+		bd->buflen = bd_len;
+
+		bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
+		length -= bd_len;
+	}
+
+	/* we always expect at least one reusable BD! */
+	if (!tail) {
+		WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
+		return;
+	} else if (i < n_bds)
+		WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
+
+	tail->next = NULL;
+	tail->hw_next = 0;
+
+	bd = rx->head;
+	rx->tail = tail;
+
+	/* short reads and other faults should terminate this entire
+	 * dma segment.  we want one "dma packet" per dma segment, not
+	 * one per USB packet, terminating the whole queue at once...
+	 * NOTE that current hardware seems to ignore SOP and EOP.
+	 */
+	bd->hw_options |= CPPI_SOP_SET;
+	tail->hw_options |= CPPI_EOP_SET;
+
+	if (debug >= 5) {
+		struct cppi_descriptor	*d;
+
+		for (d = rx->head; d; d = d->next)
+			cppi_dump_rxbd("S", d);
+	}
+
+	/* in case the preceding transfer left some state... */
+	tail = rx->last_processed;
+	if (tail) {
+		tail->next = bd;
+		tail->hw_next = bd->dma;
+	}
+
+	core_rxirq_enable(tibase, rx->index + 1);
+
+	/* BDs live in DMA-coherent memory, but writes might be pending */
+	cpu_drain_writebuffer();
+
+	/* REVISIT specs say to write this AFTER the BUFCNT register
+	 * below ... but that loses badly.
+	 */
+	musb_writel(&rx_ram->rx_head, 0, bd->dma);
+
+	/* bufferCount must be at least 3, and zeroes on completion
+	 * unless it underflows below zero, or stops at two, or keeps
+	 * growing ... grr.
+	 */
+	i = musb_readl(tibase,
+			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+			& 0xffff;
+
+	if (!i)
+		musb_writel(tibase,
+			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+			n_bds + 2);
+	else if (n_bds > (i - 3))
+		musb_writel(tibase,
+			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+			n_bds - (i - 3));
+
+	i = musb_readl(tibase,
+			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+			& 0xffff;
+	if (i < (2 + n_bds)) {
+		DBG(2, "bufcnt%d underrun - %d (for %d)\n",
+					rx->index, i, n_bds);
+		musb_writel(tibase,
+			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+			n_bds + 2);
+	}
+
+	cppi_dump_rx(4, rx, "/S");
+}
+
+/**
+ * cppi_channel_program - program channel for data transfer
+ * @ch: the channel
+ * @maxpacket: max packet size
+ * @mode: For RX, 1 unless the usb protocol driver promised to treat
+ *	all short reads as errors and kick in high level fault recovery.
+ *	For TX, ignored because of RNDIS mode races/glitches.
+ * @dma_addr: dma address of buffer
+ * @len: length of buffer
+ * Context: controller irqlocked
+ */
+static int cppi_channel_program(struct dma_channel *ch,
+		u16 maxpacket, u8 mode,
+		dma_addr_t dma_addr, u32 len)
+{
+	struct cppi_channel	*cppi_ch;
+	struct cppi		*controller;
+	struct musb		*musb;
+
+	cppi_ch = container_of(ch, struct cppi_channel, channel);
+	controller = cppi_ch->controller;
+	musb = controller->musb;
+
+	switch (ch->status) {
+	case MUSB_DMA_STATUS_BUS_ABORT:
+	case MUSB_DMA_STATUS_CORE_ABORT:
+		/* fault irq handler should have handled cleanup */
+		WARNING("%cX DMA%d not cleaned up after abort!\n",
+				cppi_ch->transmit ? 'T' : 'R',
+				cppi_ch->index);
+		/* WARN_ON(1); */
+		break;
+	case MUSB_DMA_STATUS_BUSY:
+		WARNING("program active channel?  %cX DMA%d\n",
+				cppi_ch->transmit ? 'T' : 'R',
+				cppi_ch->index);
+		/* WARN_ON(1); */
+		break;
+	case MUSB_DMA_STATUS_UNKNOWN:
+		DBG(1, "%cX DMA%d not allocated!\n",
+				cppi_ch->transmit ? 'T' : 'R',
+				cppi_ch->index);
+		/* FALLTHROUGH */
+	case MUSB_DMA_STATUS_FREE:
+		break;
+	}
+
+	ch->status = MUSB_DMA_STATUS_BUSY;
+
+	/* set transfer parameters, then queue up its first segment */
+	cppi_ch->buf_dma = dma_addr;
+	cppi_ch->offset = 0;
+	cppi_ch->maxpacket = maxpacket;
+	cppi_ch->buf_len = len;
+
+	/* TX channel? or RX? */
+	if (cppi_ch->transmit)
+		cppi_next_tx_segment(musb, cppi_ch);
+	else
+		cppi_next_rx_segment(musb, cppi_ch, mode);
+
+	return true;
+}
+
+static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
+{
+	struct cppi_channel		*rx = &cppi->rx[ch];
+	struct cppi_rx_stateram __iomem	*state = rx->state_ram;
+	struct cppi_descriptor		*bd;
+	struct cppi_descriptor		*last = rx->last_processed;
+	bool				completed = false;
+	bool				acked = false;
+	int				i;
+	dma_addr_t			safe2ack;
+	void __iomem			*regs = rx->hw_ep->regs;
+
+	cppi_dump_rx(6, rx, "/K");
+
+	bd = last ? last->next : rx->head;
+	if (!bd)
+		return false;
+
+	/* run through all completed BDs */
+	for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
+			(safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
+			i++, bd = bd->next) {
+		u16	len;
+
+		/* catch latest BD writes from CPPI */
+		rmb();
+		if (!completed && (bd->hw_options & CPPI_OWN_SET))
+			break;
+
+		DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
+			"off.len %08x opt.len %08x (%d)\n",
+			bd->dma, bd->hw_next, bd->hw_bufp,
+			bd->hw_off_len, bd->hw_options,
+			rx->channel.actual_len);
+
+		/* actual packet received length */
+		if ((bd->hw_options & CPPI_SOP_SET) && !completed)
+			len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
+		else
+			len = 0;
+
+		if (bd->hw_options & CPPI_EOQ_MASK)
+			completed = true;
+
+		if (!completed && len < bd->buflen) {
+			/* NOTE:  when we get a short packet, RXCSR_H_REQPKT
+			 * must have been cleared, and no more DMA packets may
+			 * active be in the queue... TI docs didn't say, but
+			 * CPPI ignores those BDs even though OWN is still set.
+			 */
+			completed = true;
+			DBG(3, "rx short %d/%d (%d)\n",
+					len, bd->buflen,
+					rx->channel.actual_len);
+		}
+
+		/* If we got here, we expect to ack at least one BD; meanwhile
+		 * CPPI may completing other BDs while we scan this list...
+		 *
+		 * RACE: we can notice OWN cleared before CPPI raises the
+		 * matching irq by writing that BD as the completion pointer.
+		 * In such cases, stop scanning and wait for the irq, avoiding
+		 * lost acks and states where BD ownership is unclear.
+		 */
+		if (bd->dma == safe2ack) {
+			musb_writel(&state->rx_complete, 0, safe2ack);
+			safe2ack = musb_readl(&state->rx_complete, 0);
+			acked = true;
+			if (bd->dma == safe2ack)
+				safe2ack = 0;
+		}
+
+		rx->channel.actual_len += len;
+
+		cppi_bd_free(rx, last);
+		last = bd;
+
+		/* stop scanning on end-of-segment */
+		if (bd->hw_next == 0)
+			completed = true;
+	}
+	rx->last_processed = last;
+
+	/* dma abort, lost ack, or ... */
+	if (!acked && last) {
+		int	csr;
+
+		if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
+			musb_writel(&state->rx_complete, 0, safe2ack);
+		if (safe2ack == 0) {
+			cppi_bd_free(rx, last);
+			rx->last_processed = NULL;
+
+			/* if we land here on the host side, H_REQPKT will
+			 * be clear and we need to restart the queue...
+			 */
+			WARN_ON(rx->head);
+		}
+		musb_ep_select(cppi->mregs, rx->index + 1);
+		csr = musb_readw(regs, MUSB_RXCSR);
+		if (csr & MUSB_RXCSR_DMAENAB) {
+			DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
+				rx->index,
+				rx->head, rx->tail,
+				rx->last_processed
+					? rx->last_processed->dma
+					: 0,
+				completed ? ", completed" : "",
+				csr);
+			cppi_dump_rxq(4, "/what?", rx);
+		}
+	}
+	if (!completed) {
+		int	csr;
+
+		rx->head = bd;
+
+		/* REVISIT seems like "autoreq all but EOP" doesn't...
+		 * setting it here "should" be racey, but seems to work
+		 */
+		csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+		if (is_host_active(cppi->musb)
+				&& bd
+				&& !(csr & MUSB_RXCSR_H_REQPKT)) {
+			csr |= MUSB_RXCSR_H_REQPKT;
+			musb_writew(regs, MUSB_RXCSR,
+					MUSB_RXCSR_H_WZC_BITS | csr);
+			csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+		}
+	} else {
+		rx->head = NULL;
+		rx->tail = NULL;
+	}
+
+	cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
+	return completed;
+}
+
+void cppi_completion(struct musb *musb, u32 rx, u32 tx)
+{
+	void __iomem		*tibase;
+	int			i, index;
+	struct cppi		*cppi;
+	struct musb_hw_ep	*hw_ep = NULL;
+
+	cppi = container_of(musb->dma_controller, struct cppi, controller);
+
+	tibase = musb->ctrl_base;
+
+	/* process TX channels */
+	for (index = 0; tx; tx = tx >> 1, index++) {
+		struct cppi_channel		*tx_ch;
+		struct cppi_tx_stateram __iomem	*tx_ram;
+		bool				completed = false;
+		struct cppi_descriptor		*bd;
+
+		if (!(tx & 1))
+			continue;
+
+		tx_ch = cppi->tx + index;
+		tx_ram = tx_ch->state_ram;
+
+		/* FIXME  need a cppi_tx_scan() routine, which
+		 * can also be called from abort code
+		 */
+
+		cppi_dump_tx(5, tx_ch, "/E");
+
+		bd = tx_ch->head;
+
+		if (NULL == bd) {
+			DBG(1, "null BD\n");
+			continue;
+		}
+
+		/* run through all completed BDs */
+		for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
+				i++, bd = bd->next) {
+			u16	len;
+
+			/* catch latest BD writes from CPPI */
+			rmb();
+			if (bd->hw_options & CPPI_OWN_SET)
+				break;
+
+			DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
+					bd, bd->hw_next, bd->hw_bufp,
+					bd->hw_off_len, bd->hw_options);
+
+			len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
+			tx_ch->channel.actual_len += len;
+
+			tx_ch->last_processed = bd;
+
+			/* write completion register to acknowledge
+			 * processing of completed BDs, and possibly
+			 * release the IRQ; EOQ might not be set ...
+			 *
+			 * REVISIT use the same ack strategy as rx
+			 *
+			 * REVISIT have observed bit 18 set; huh??
+			 */
+			/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
+				musb_writel(&tx_ram->tx_complete, 0, bd->dma);
+
+			/* stop scanning on end-of-segment */
+			if (bd->hw_next == 0)
+				completed = true;
+		}
+
+		/* on end of segment, maybe go to next one */
+		if (completed) {
+			/* cppi_dump_tx(4, tx_ch, "/complete"); */
+
+			/* transfer more, or report completion */
+			if (tx_ch->offset >= tx_ch->buf_len) {
+				tx_ch->head = NULL;
+				tx_ch->tail = NULL;
+				tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+				hw_ep = tx_ch->hw_ep;
+
+				/* Peripheral role never repurposes the
+				 * endpoint, so immediate completion is
+				 * safe.  Host role waits for the fifo
+				 * to empty (TXPKTRDY irq) before going
+				 * to the next queued bulk transfer.
+				 */
+				if (is_host_active(cppi->musb)) {
+#if 0
+					/* WORKAROUND because we may
+					 * not always get TXKPTRDY ...
+					 */
+					int	csr;
+
+					csr = musb_readw(hw_ep->regs,
+						MUSB_TXCSR);
+					if (csr & MUSB_TXCSR_TXPKTRDY)
+#endif
+						completed = false;
+				}
+				if (completed)
+					musb_dma_completion(musb, index + 1, 1);
+
+			} else {
+				/* Bigger transfer than we could fit in
+				 * that first batch of descriptors...
+				 */
+				cppi_next_tx_segment(musb, tx_ch);
+			}
+		} else
+			tx_ch->head = bd;
+	}
+
+	/* Start processing the RX block */
+	for (index = 0; rx; rx = rx >> 1, index++) {
+
+		if (rx & 1) {
+			struct cppi_channel		*rx_ch;
+
+			rx_ch = cppi->rx + index;
+
+			/* let incomplete dma segments finish */
+			if (!cppi_rx_scan(cppi, index))
+				continue;
+
+			/* start another dma segment if needed */
+			if (rx_ch->channel.actual_len != rx_ch->buf_len
+					&& rx_ch->channel.actual_len
+						== rx_ch->offset) {
+				cppi_next_rx_segment(musb, rx_ch, 1);
+				continue;
+			}
+
+			/* all segments completed! */
+			rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+			hw_ep = rx_ch->hw_ep;
+
+			core_rxirq_disable(tibase, index + 1);
+			musb_dma_completion(musb, index + 1, 0);
+		}
+	}
+
+	/* write to CPPI EOI register to re-enable interrupts */
+	musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+}
+
+/* Instantiate a software object representing a DMA controller. */
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *mregs)
+{
+	struct cppi		*controller;
+
+	controller = kzalloc(sizeof *controller, GFP_KERNEL);
+	if (!controller)
+		return NULL;
+
+	controller->mregs = mregs;
+	controller->tibase = mregs - DAVINCI_BASE_OFFSET;
+
+	controller->musb = musb;
+	controller->controller.start = cppi_controller_start;
+	controller->controller.stop = cppi_controller_stop;
+	controller->controller.channel_alloc = cppi_channel_allocate;
+	controller->controller.channel_release = cppi_channel_release;
+	controller->controller.channel_program = cppi_channel_program;
+	controller->controller.channel_abort = cppi_channel_abort;
+
+	/* NOTE: allocating from on-chip SRAM would give the least
+	 * contention for memory access, if that ever matters here.
+	 */
+
+	/* setup BufferPool */
+	controller->pool = dma_pool_create("cppi",
+			controller->musb->controller,
+			sizeof(struct cppi_descriptor),
+			CPPI_DESCRIPTOR_ALIGN, 0);
+	if (!controller->pool) {
+		kfree(controller);
+		return NULL;
+	}
+
+	return &controller->controller;
+}
+
+/*
+ *  Destroy a previously-instantiated DMA controller.
+ */
+void dma_controller_destroy(struct dma_controller *c)
+{
+	struct cppi	*cppi;
+
+	cppi = container_of(c, struct cppi, controller);
+
+	/* assert:  caller stopped the controller first */
+	dma_pool_destroy(cppi->pool);
+
+	kfree(cppi);
+}
+
+/*
+ * Context: controller irqlocked, endpoint selected
+ */
+static int cppi_channel_abort(struct dma_channel *channel)
+{
+	struct cppi_channel	*cppi_ch;
+	struct cppi		*controller;
+	void __iomem		*mbase;
+	void __iomem		*tibase;
+	void __iomem		*regs;
+	u32			value;
+	struct cppi_descriptor	*queue;
+
+	cppi_ch = container_of(channel, struct cppi_channel, channel);
+
+	controller = cppi_ch->controller;
+
+	switch (channel->status) {
+	case MUSB_DMA_STATUS_BUS_ABORT:
+	case MUSB_DMA_STATUS_CORE_ABORT:
+		/* from RX or TX fault irq handler */
+	case MUSB_DMA_STATUS_BUSY:
+		/* the hardware needs shutting down */
+		regs = cppi_ch->hw_ep->regs;
+		break;
+	case MUSB_DMA_STATUS_UNKNOWN:
+	case MUSB_DMA_STATUS_FREE:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	if (!cppi_ch->transmit && cppi_ch->head)
+		cppi_dump_rxq(3, "/abort", cppi_ch);
+
+	mbase = controller->mregs;
+	tibase = controller->tibase;
+
+	queue = cppi_ch->head;
+	cppi_ch->head = NULL;
+	cppi_ch->tail = NULL;
+
+	/* REVISIT should rely on caller having done this,
+	 * and caller should rely on us not changing it.
+	 * peripheral code is safe ... check host too.
+	 */
+	musb_ep_select(mbase, cppi_ch->index + 1);
+
+	if (cppi_ch->transmit) {
+		struct cppi_tx_stateram __iomem *tx_ram;
+		int			enabled;
+
+		/* mask interrupts raised to signal teardown complete.  */
+		enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
+				& (1 << cppi_ch->index);
+		if (enabled)
+			musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+					(1 << cppi_ch->index));
+
+		/* REVISIT put timeouts on these controller handshakes */
+
+		cppi_dump_tx(6, cppi_ch, " (teardown)");
+
+		/* teardown DMA engine then usb core */
+		do {
+			value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
+		} while (!(value & CPPI_TEAR_READY));
+		musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
+
+		tx_ram = cppi_ch->state_ram;
+		do {
+			value = musb_readl(&tx_ram->tx_complete, 0);
+		} while (0xFFFFFFFC != value);
+		musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
+
+		/* FIXME clean up the transfer state ... here?
+		 * the completion routine should get called with
+		 * an appropriate status code.
+		 */
+
+		value = musb_readw(regs, MUSB_TXCSR);
+		value &= ~MUSB_TXCSR_DMAENAB;
+		value |= MUSB_TXCSR_FLUSHFIFO;
+		musb_writew(regs, MUSB_TXCSR, value);
+		musb_writew(regs, MUSB_TXCSR, value);
+
+		/* re-enable interrupt */
+		if (enabled)
+			musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+					(1 << cppi_ch->index));
+
+		/* While we scrub the TX state RAM, ensure that we clean
+		 * up any interrupt that's currently asserted:
+		 * 1. Write to completion Ptr value 0x1(bit 0 set)
+		 *    (write back mode)
+		 * 2. Write to completion Ptr value 0x0(bit 0 cleared)
+		 *    (compare mode)
+		 * Value written is compared(for bits 31:2) and when
+		 * equal, interrupt is deasserted.
+		 */
+		cppi_reset_tx(tx_ram, 1);
+		musb_writel(&tx_ram->tx_complete, 0, 0);
+
+		cppi_dump_tx(5, cppi_ch, " (done teardown)");
+
+		/* REVISIT tx side _should_ clean up the same way
+		 * as the RX side ... this does no cleanup at all!
+		 */
+
+	} else /* RX */ {
+		u16			csr;
+
+		/* NOTE: docs don't guarantee any of this works ...  we
+		 * expect that if the usb core stops telling the cppi core
+		 * to pull more data from it, then it'll be safe to flush
+		 * current RX DMA state iff any pending fifo transfer is done.
+		 */
+
+		core_rxirq_disable(tibase, cppi_ch->index + 1);
+
+		/* for host, ensure ReqPkt is never set again */
+		if (is_host_active(cppi_ch->controller->musb)) {
+			value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+			value &= ~((0x3) << (cppi_ch->index * 2));
+			musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
+		}
+
+		csr = musb_readw(regs, MUSB_RXCSR);
+
+		/* for host, clear (just) ReqPkt at end of current packet(s) */
+		if (is_host_active(cppi_ch->controller->musb)) {
+			csr |= MUSB_RXCSR_H_WZC_BITS;
+			csr &= ~MUSB_RXCSR_H_REQPKT;
+		} else
+			csr |= MUSB_RXCSR_P_WZC_BITS;
+
+		/* clear dma enable */
+		csr &= ~(MUSB_RXCSR_DMAENAB);
+		musb_writew(regs, MUSB_RXCSR, csr);
+		csr = musb_readw(regs, MUSB_RXCSR);
+
+		/* Quiesce: wait for current dma to finish (if not cleanup).
+		 * We can't use bit zero of stateram->rx_sop, since that
+		 * refers to an entire "DMA packet" not just emptying the
+		 * current fifo.  Most segments need multiple usb packets.
+		 */
+		if (channel->status == MUSB_DMA_STATUS_BUSY)
+			udelay(50);
+
+		/* scan the current list, reporting any data that was
+		 * transferred and acking any IRQ
+		 */
+		cppi_rx_scan(controller, cppi_ch->index);
+
+		/* clobber the existing state once it's idle
+		 *
+		 * NOTE:  arguably, we should also wait for all the other
+		 * RX channels to quiesce (how??) and then temporarily
+		 * disable RXCPPI_CTRL_REG ... but it seems that we can
+		 * rely on the controller restarting from state ram, with
+		 * only RXCPPI_BUFCNT state being bogus.  BUFCNT will
+		 * correct itself after the next DMA transfer though.
+		 *
+		 * REVISIT does using rndis mode change that?
+		 */
+		cppi_reset_rx(cppi_ch->state_ram);
+
+		/* next DMA request _should_ load cppi head ptr */
+
+		/* ... we don't "free" that list, only mutate it in place.  */
+		cppi_dump_rx(5, cppi_ch, " (done abort)");
+
+		/* clean up previously pending bds */
+		cppi_bd_free(cppi_ch, cppi_ch->last_processed);
+		cppi_ch->last_processed = NULL;
+
+		while (queue) {
+			struct cppi_descriptor	*tmp = queue->next;
+
+			cppi_bd_free(cppi_ch, queue);
+			queue = tmp;
+		}
+	}
+
+	channel->status = MUSB_DMA_STATUS_FREE;
+	cppi_ch->buf_dma = 0;
+	cppi_ch->offset = 0;
+	cppi_ch->buf_len = 0;
+	cppi_ch->maxpacket = 0;
+	return 0;
+}
+
+/* TBD Queries:
+ *
+ * Power Management ... probably turn off cppi during suspend, restart;
+ * check state ram?  Clocking is presumably shared with usb core.
+ */
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
new file mode 100644
index 0000000..fc5216b
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.h
@@ -0,0 +1,133 @@
+/* Copyright (C) 2005-2006 by Texas Instruments */
+
+#ifndef _CPPI_DMA_H_
+#define _CPPI_DMA_H_
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/dmapool.h>
+
+#include "musb_dma.h"
+#include "musb_core.h"
+
+
+/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
+ * would seem to be shared with the TUSB6020 (over VLYNQ).
+ */
+
+#include "davinci.h"
+
+
+/* CPPI RX/TX state RAM */
+
+struct cppi_tx_stateram {
+	u32 tx_head;			/* "DMA packet" head descriptor */
+	u32 tx_buf;
+	u32 tx_current;			/* current descriptor */
+	u32 tx_buf_current;
+	u32 tx_info;			/* flags, remaining buflen */
+	u32 tx_rem_len;
+	u32 tx_dummy;			/* unused */
+	u32 tx_complete;
+};
+
+struct cppi_rx_stateram {
+	u32 rx_skipbytes;
+	u32 rx_head;
+	u32 rx_sop;			/* "DMA packet" head descriptor */
+	u32 rx_current;			/* current descriptor */
+	u32 rx_buf_current;
+	u32 rx_len_len;
+	u32 rx_cnt_cnt;
+	u32 rx_complete;
+};
+
+/* hw_options bits in CPPI buffer descriptors */
+#define CPPI_SOP_SET	((u32)(1 << 31))
+#define CPPI_EOP_SET	((u32)(1 << 30))
+#define CPPI_OWN_SET	((u32)(1 << 29))	/* owned by cppi */
+#define CPPI_EOQ_MASK	((u32)(1 << 28))
+#define CPPI_ZERO_SET	((u32)(1 << 23))	/* rx saw zlp; tx issues one */
+#define CPPI_RXABT_MASK	((u32)(1 << 19))	/* need more rx buffers */
+
+#define CPPI_RECV_PKTLEN_MASK 0xFFFF
+#define CPPI_BUFFER_LEN_MASK 0xFFFF
+
+#define CPPI_TEAR_READY ((u32)(1 << 31))
+
+/* CPPI data structure definitions */
+
+#define	CPPI_DESCRIPTOR_ALIGN	16	/* bytes; 5-dec docs say 4-byte align */
+
+struct cppi_descriptor {
+	/* hardware overlay */
+	u32		hw_next;	/* next buffer descriptor Pointer */
+	u32		hw_bufp;	/* i/o buffer pointer */
+	u32		hw_off_len;	/* buffer_offset16, buffer_length16 */
+	u32		hw_options;	/* flags:  SOP, EOP etc*/
+
+	struct cppi_descriptor *next;
+	dma_addr_t	dma;		/* address of this descriptor */
+	u32		buflen;		/* for RX: original buffer length */
+} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
+
+
+struct cppi;
+
+/* CPPI  Channel Control structure */
+struct cppi_channel {
+	struct dma_channel	channel;
+
+	/* back pointer to the DMA controller structure */
+	struct cppi		*controller;
+
+	/* which direction of which endpoint? */
+	struct musb_hw_ep	*hw_ep;
+	bool			transmit;
+	u8			index;
+
+	/* DMA modes:  RNDIS or "transparent" */
+	u8			is_rndis;
+
+	/* book keeping for current transfer request */
+	dma_addr_t		buf_dma;
+	u32			buf_len;
+	u32			maxpacket;
+	u32			offset;		/* dma requested */
+
+	void __iomem		*state_ram;	/* CPPI state */
+
+	struct cppi_descriptor	*freelist;
+
+	/* BD management fields */
+	struct cppi_descriptor	*head;
+	struct cppi_descriptor	*tail;
+	struct cppi_descriptor	*last_processed;
+
+	/* use tx_complete in host role to track endpoints waiting for
+	 * FIFONOTEMPTY to clear.
+	 */
+	struct list_head	tx_complete;
+};
+
+/* CPPI DMA controller object */
+struct cppi {
+	struct dma_controller		controller;
+	struct musb			*musb;
+	void __iomem			*mregs;		/* Mentor regs */
+	void __iomem			*tibase;	/* TI/CPPI regs */
+
+	struct cppi_channel		tx[MUSB_C_NUM_EPT - 1];
+	struct cppi_channel		rx[MUSB_C_NUM_EPR - 1];
+
+	struct dma_pool			*pool;
+
+	struct list_head		tx_complete;
+};
+
+/* irq handling hook */
+extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+
+#endif				/* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
new file mode 100644
index 0000000..75baf18
--- /dev/null
+++ b/drivers/usb/musb/davinci.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/arch/gpio.h>
+#include <asm/mach-types.h>
+
+#include "musb_core.h"
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#include <asm/arch/i2c-client.h>
+#endif
+
+#include "davinci.h"
+#include "cppi_dma.h"
+
+
+/* REVISIT (PM) we should be able to keep the PHY in low power mode most
+ * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
+ * and, when in host mode, autosuspending idle root ports... PHYPLLON
+ * (overriding SUSPENDM?) then likely needs to stay off.
+ */
+
+static inline void phy_on(void)
+{
+	/* start the on-chip PHY and its PLL */
+	__raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
+			(void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR));
+	while ((__raw_readl((void __force __iomem *)
+				IO_ADDRESS(USBPHY_CTL_PADDR))
+			& USBPHY_PHYCLKGD) == 0)
+		cpu_relax();
+}
+
+static inline void phy_off(void)
+{
+	/* powerdown the on-chip PHY and its oscillator */
+	__raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *)
+			IO_ADDRESS(USBPHY_CTL_PADDR));
+}
+
+static int dma_off = 1;
+
+void musb_platform_enable(struct musb *musb)
+{
+	u32	tmp, old, val;
+
+	/* workaround:  setup irqs through both register sets */
+	tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK)
+			<< DAVINCI_USB_TXINT_SHIFT;
+	musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+	old = tmp;
+	tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
+			<< DAVINCI_USB_RXINT_SHIFT;
+	musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+	tmp |= old;
+
+	val = ~MUSB_INTR_SOF;
+	tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
+	musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+
+	if (is_dma_capable() && !dma_off)
+		printk(KERN_WARNING "%s %s: dma not reactivated\n",
+				__FILE__, __func__);
+	else
+		dma_off = 0;
+
+	/* force a DRVVBUS irq so we can start polling for ID change */
+	if (is_otg_enabled(musb))
+		musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+			DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
+}
+
+/*
+ * Disable the HDRC and flush interrupts
+ */
+void musb_platform_disable(struct musb *musb)
+{
+	/* because we don't set CTRLR.UINT, "important" to:
+	 *  - not read/write INTRUSB/INTRUSBE
+	 *  - (except during initial setup, as workaround)
+	 *  - use INTSETR/INTCLRR instead
+	 */
+	musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
+			  DAVINCI_USB_USBINT_MASK
+			| DAVINCI_USB_TXINT_MASK
+			| DAVINCI_USB_RXINT_MASK);
+	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+	musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
+
+	if (is_dma_capable() && !dma_off)
+		WARNING("dma still active\n");
+}
+
+
+/* REVISIT it's not clear whether DaVinci can support full OTG.  */
+
+static int vbus_state = -1;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#define	portstate(stmt)		stmt
+#else
+#define	portstate(stmt)
+#endif
+
+
+/* VBUS SWITCHING IS BOARD-SPECIFIC */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#ifndef CONFIG_MACH_DAVINCI_EVM_OTG
+
+/* I2C operations are always synchronous, and require a task context.
+ * With unloaded systems, using the shared workqueue seems to suffice
+ * to satisfy the 100msec A_WAIT_VRISE timeout...
+ */
+static void evm_deferred_drvvbus(struct work_struct *ignored)
+{
+	davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state);
+	vbus_state = !vbus_state;
+}
+static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
+
+#endif	/* modified board */
+#endif	/* EVM */
+
+static void davinci_source_power(struct musb *musb, int is_on, int immediate)
+{
+	if (is_on)
+		is_on = 1;
+
+	if (vbus_state == is_on)
+		return;
+	vbus_state = !is_on;		/* 0/1 vs "-1 == unknown/init" */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+	if (machine_is_davinci_evm()) {
+#ifdef CONFIG_MACH_DAVINCI_EVM_OTG
+		/* modified EVM board switching VBUS with GPIO(6) not I2C
+		 * NOTE:  PINMUX0.RGB888 (bit23) must be clear
+		 */
+		if (is_on)
+			gpio_set(GPIO(6));
+		else
+			gpio_clear(GPIO(6));
+		immediate = 1;
+#else
+		if (immediate)
+			davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
+		else
+			schedule_work(&evm_vbus_work);
+#endif
+	}
+#endif
+	if (immediate)
+		vbus_state = is_on;
+}
+
+static void davinci_set_vbus(struct musb *musb, int is_on)
+{
+	WARN_ON(is_on && is_peripheral_active(musb));
+	davinci_source_power(musb, is_on, 0);
+}
+
+
+#define	POLL_SECONDS	2
+
+static struct timer_list otg_workaround;
+
+static void otg_timer(unsigned long _musb)
+{
+	struct musb		*musb = (void *)_musb;
+	void __iomem		*mregs = musb->mregs;
+	u8			devctl;
+	unsigned long		flags;
+
+	/* We poll because DaVinci's won't expose several OTG-critical
+	* status change events (from the transceiver) otherwise.
+	 */
+	devctl = musb_readb(mregs, MUSB_DEVCTL);
+	DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
+
+	spin_lock_irqsave(&musb->lock, flags);
+	switch (musb->xceiv.state) {
+	case OTG_STATE_A_WAIT_VFALL:
+		/* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
+		 * seems to mis-handle session "start" otherwise (or in our
+		 * case "recover"), in routine "VBUS was valid by the time
+		 * VBUSERR got reported during enumeration" cases.
+		 */
+		if (devctl & MUSB_DEVCTL_VBUS) {
+			mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+			break;
+		}
+		musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+		musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+			MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
+		break;
+	case OTG_STATE_B_IDLE:
+		if (!is_peripheral_enabled(musb))
+			break;
+
+		/* There's no ID-changed IRQ, so we have no good way to tell
+		 * when to switch to the A-Default state machine (by setting
+		 * the DEVCTL.SESSION flag).
+		 *
+		 * Workaround:  whenever we're in B_IDLE, try setting the
+		 * session flag every few seconds.  If it works, ID was
+		 * grounded and we're now in the A-Default state machine.
+		 *
+		 * NOTE setting the session flag is _supposed_ to trigger
+		 * SRP, but clearly it doesn't.
+		 */
+		musb_writeb(mregs, MUSB_DEVCTL,
+				devctl | MUSB_DEVCTL_SESSION);
+		devctl = musb_readb(mregs, MUSB_DEVCTL);
+		if (devctl & MUSB_DEVCTL_BDEVICE)
+			mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+		else
+			musb->xceiv.state = OTG_STATE_A_IDLE;
+		break;
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static irqreturn_t davinci_interrupt(int irq, void *__hci)
+{
+	unsigned long	flags;
+	irqreturn_t	retval = IRQ_NONE;
+	struct musb	*musb = __hci;
+	void __iomem	*tibase = musb->ctrl_base;
+	u32		tmp;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	/* NOTE: DaVinci shadows the Mentor IRQs.  Don't manage them through
+	 * the Mentor registers (except for setup), use the TI ones and EOI.
+	 *
+	 * Docs describe irq "vector" registers asociated with the CPPI and
+	 * USB EOI registers.  These hold a bitmask corresponding to the
+	 * current IRQ, not an irq handler address.  Would using those bits
+	 * resolve some of the races observed in this dispatch code??
+	 */
+
+	/* CPPI interrupts share the same IRQ line, but have their own
+	 * mask, state, "vector", and EOI registers.
+	 */
+	if (is_cppi_enabled()) {
+		u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+		u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+		if (cppi_tx || cppi_rx) {
+			DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
+			cppi_completion(musb, cppi_rx, cppi_tx);
+			retval = IRQ_HANDLED;
+		}
+	}
+
+	/* ack and handle non-CPPI interrupts */
+	tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
+	musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
+	DBG(4, "IRQ %08x\n", tmp);
+
+	musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
+			>> DAVINCI_USB_RXINT_SHIFT;
+	musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
+			>> DAVINCI_USB_TXINT_SHIFT;
+	musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
+			>> DAVINCI_USB_USBINT_SHIFT;
+
+	/* DRVVBUS irqs are the only proxy we have (a very poor one!) for
+	 * DaVinci's missing ID change IRQ.  We need an ID change IRQ to
+	 * switch appropriately between halves of the OTG state machine.
+	 * Managing DEVCTL.SESSION per Mentor docs requires we know its
+	 * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
+	 * Also, DRVVBUS pulses for SRP (but not at 5V) ...
+	 */
+	if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) {
+		int	drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
+		void __iomem *mregs = musb->mregs;
+		u8	devctl = musb_readb(mregs, MUSB_DEVCTL);
+		int	err = musb->int_usb & MUSB_INTR_VBUSERROR;
+
+		err = is_host_enabled(musb)
+				&& (musb->int_usb & MUSB_INTR_VBUSERROR);
+		if (err) {
+			/* The Mentor core doesn't debounce VBUS as needed
+			 * to cope with device connect current spikes. This
+			 * means it's not uncommon for bus-powered devices
+			 * to get VBUS errors during enumeration.
+			 *
+			 * This is a workaround, but newer RTL from Mentor
+			 * seems to allow a better one: "re"starting sessions
+			 * without waiting (on EVM, a **long** time) for VBUS
+			 * to stop registering in devctl.
+			 */
+			musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+			musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+			mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+			WARNING("VBUS error workaround (delay coming)\n");
+		} else if (is_host_enabled(musb) && drvvbus) {
+			musb->is_active = 1;
+			MUSB_HST_MODE(musb);
+			musb->xceiv.default_a = 1;
+			musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+			portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+			del_timer(&otg_workaround);
+		} else {
+			musb->is_active = 0;
+			MUSB_DEV_MODE(musb);
+			musb->xceiv.default_a = 0;
+			musb->xceiv.state = OTG_STATE_B_IDLE;
+			portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+		}
+
+		/* NOTE:  this must complete poweron within 100 msec */
+		davinci_source_power(musb, drvvbus, 0);
+		DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
+				drvvbus ? "on" : "off",
+				otg_state_string(musb),
+				err ? " ERROR" : "",
+				devctl);
+		retval = IRQ_HANDLED;
+	}
+
+	if (musb->int_tx || musb->int_rx || musb->int_usb)
+		retval |= musb_interrupt(musb);
+
+	/* irq stays asserted until EOI is written */
+	musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
+
+	/* poll for ID change */
+	if (is_otg_enabled(musb)
+			&& musb->xceiv.state == OTG_STATE_B_IDLE)
+		mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	/* REVISIT we sometimes get unhandled IRQs
+	 * (e.g. ep0).  not clear why...
+	 */
+	if (retval != IRQ_HANDLED)
+		DBG(5, "unhandled? %08x\n", tmp);
+	return IRQ_HANDLED;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+	void __iomem	*tibase = musb->ctrl_base;
+	u32		revision;
+
+	musb->mregs += DAVINCI_BASE_OFFSET;
+#if 0
+	/* REVISIT there's something odd about clocking, this
+	 * didn't appear do the job ...
+	 */
+	musb->clock = clk_get(pDevice, "usb");
+	if (IS_ERR(musb->clock))
+		return PTR_ERR(musb->clock);
+
+	status = clk_enable(musb->clock);
+	if (status < 0)
+		return -ENODEV;
+#endif
+
+	/* returns zero if e.g. not clocked */
+	revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
+	if (revision == 0)
+		return -ENODEV;
+
+	if (is_host_enabled(musb))
+		setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+
+	musb->board_set_vbus = davinci_set_vbus;
+	davinci_source_power(musb, 0, 1);
+
+	/* reset the controller */
+	musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
+
+	/* start the on-chip PHY and its PLL */
+	phy_on();
+
+	msleep(5);
+
+	/* NOTE:  irqs are in mixed mode, not bypass to pure-musb */
+	pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
+		revision, __raw_readl((void __force __iomem *)
+				IO_ADDRESS(USBPHY_CTL_PADDR)),
+		musb_readb(tibase, DAVINCI_USB_CTRL_REG));
+
+	musb->isr = davinci_interrupt;
+	return 0;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+	if (is_host_enabled(musb))
+		del_timer_sync(&otg_workaround);
+
+	davinci_source_power(musb, 0 /*off*/, 1);
+
+	/* delay, to avoid problems with module reload */
+	if (is_host_enabled(musb) && musb->xceiv.default_a) {
+		int	maxdelay = 30;
+		u8	devctl, warn = 0;
+
+		/* if there's no peripheral connected, this can take a
+		 * long time to fall, especially on EVM with huge C133.
+		 */
+		do {
+			devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+			if (!(devctl & MUSB_DEVCTL_VBUS))
+				break;
+			if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
+				warn = devctl & MUSB_DEVCTL_VBUS;
+				DBG(1, "VBUS %d\n",
+					warn >> MUSB_DEVCTL_VBUS_SHIFT);
+			}
+			msleep(1000);
+			maxdelay--;
+		} while (maxdelay > 0);
+
+		/* in OTG mode, another host might be connected */
+		if (devctl & MUSB_DEVCTL_VBUS)
+			DBG(1, "VBUS off timeout (devctl %02x)\n", devctl);
+	}
+
+	phy_off();
+	return 0;
+}
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
new file mode 100644
index 0000000..7fb6238
--- /dev/null
+++ b/drivers/usb/musb/davinci.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_HDRDF_H__
+#define __MUSB_HDRDF_H__
+
+/*
+ * DaVinci-specific definitions
+ */
+
+/* Integrated highspeed/otg PHY */
+#define	USBPHY_CTL_PADDR	(DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define	USBPHY_PHYCLKGD		(1 << 8)
+#define	USBPHY_SESNDEN		(1 << 7)	/* v(sess_end) comparator */
+#define	USBPHY_VBDTCTEN		(1 << 6)	/* v(bus) comparator */
+#define	USBPHY_PHYPLLON		(1 << 4)	/* override pll suspend */
+#define	USBPHY_CLKO1SEL		(1 << 3)
+#define	USBPHY_OSCPDWN		(1 << 2)
+#define	USBPHY_PHYPDWN		(1 << 0)
+
+/* For now include usb OTG module registers here */
+#define DAVINCI_USB_VERSION_REG		0x00
+#define DAVINCI_USB_CTRL_REG		0x04
+#define DAVINCI_USB_STAT_REG		0x08
+#define DAVINCI_RNDIS_REG		0x10
+#define DAVINCI_AUTOREQ_REG		0x14
+#define DAVINCI_USB_INT_SOURCE_REG	0x20
+#define DAVINCI_USB_INT_SET_REG		0x24
+#define DAVINCI_USB_INT_SRC_CLR_REG	0x28
+#define DAVINCI_USB_INT_MASK_REG	0x2c
+#define DAVINCI_USB_INT_MASK_SET_REG	0x30
+#define DAVINCI_USB_INT_MASK_CLR_REG	0x34
+#define DAVINCI_USB_INT_SRC_MASKED_REG	0x38
+#define DAVINCI_USB_EOI_REG		0x3c
+#define DAVINCI_USB_EOI_INTVEC		0x40
+
+/* BEGIN CPPI-generic (?) */
+
+/* CPPI related registers */
+#define DAVINCI_TXCPPI_CTRL_REG		0x80
+#define DAVINCI_TXCPPI_TEAR_REG		0x84
+#define DAVINCI_CPPI_EOI_REG		0x88
+#define DAVINCI_CPPI_INTVEC_REG		0x8c
+#define DAVINCI_TXCPPI_MASKED_REG	0x90
+#define DAVINCI_TXCPPI_RAW_REG		0x94
+#define DAVINCI_TXCPPI_INTENAB_REG	0x98
+#define DAVINCI_TXCPPI_INTCLR_REG	0x9c
+
+#define DAVINCI_RXCPPI_CTRL_REG		0xC0
+#define DAVINCI_RXCPPI_MASKED_REG	0xD0
+#define DAVINCI_RXCPPI_RAW_REG		0xD4
+#define DAVINCI_RXCPPI_INTENAB_REG	0xD8
+#define DAVINCI_RXCPPI_INTCLR_REG	0xDC
+
+#define DAVINCI_RXCPPI_BUFCNT0_REG	0xE0
+#define DAVINCI_RXCPPI_BUFCNT1_REG	0xE4
+#define DAVINCI_RXCPPI_BUFCNT2_REG	0xE8
+#define DAVINCI_RXCPPI_BUFCNT3_REG	0xEC
+
+/* CPPI state RAM entries */
+#define DAVINCI_CPPI_STATERAM_BASE_OFFSET   0x100
+
+#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \
+	(DAVINCI_CPPI_STATERAM_BASE_OFFSET +       ((chnum) * 0x40))
+#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \
+	(DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40))
+
+/* CPPI masks */
+#define DAVINCI_DMA_CTRL_ENABLE		1
+#define DAVINCI_DMA_CTRL_DISABLE	0
+
+#define DAVINCI_DMA_ALL_CHANNELS_ENABLE	0xF
+#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF
+
+/* END CPPI-generic (?) */
+
+#define DAVINCI_USB_TX_ENDPTS_MASK	0x1f		/* ep0 + 4 tx */
+#define DAVINCI_USB_RX_ENDPTS_MASK	0x1e		/* 4 rx */
+
+#define DAVINCI_USB_USBINT_SHIFT	16
+#define DAVINCI_USB_TXINT_SHIFT		0
+#define DAVINCI_USB_RXINT_SHIFT		8
+
+#define DAVINCI_INTR_DRVVBUS		0x0100
+
+#define DAVINCI_USB_USBINT_MASK		0x01ff0000	/* 8 Mentor, DRVVBUS */
+#define DAVINCI_USB_TXINT_MASK \
+	(DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT)
+#define DAVINCI_USB_RXINT_MASK \
+	(DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT)
+
+#define DAVINCI_BASE_OFFSET		0x400
+
+#endif	/* __MUSB_HDRDF_H__ */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
new file mode 100644
index 0000000..d68ec6d
--- /dev/null
+++ b/drivers/usb/musb/musb_core.c
@@ -0,0 +1,2261 @@
+/*
+ * MUSB OTG driver core code
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
+ *
+ * This consists of a Host Controller Driver (HCD) and a peripheral
+ * controller driver implementing the "Gadget" API; OTG support is
+ * in the works.  These are normal Linux-USB controller drivers which
+ * use IRQs and have no dedicated thread.
+ *
+ * This version of the driver has only been used with products from
+ * Texas Instruments.  Those products integrate the Inventra logic
+ * with other DMA, IRQ, and bus modules, as well as other logic that
+ * needs to be reflected in this driver.
+ *
+ *
+ * NOTE:  the original Mentor code here was pretty much a collection
+ * of mechanisms that don't seem to have been fully integrated/working
+ * for *any* Linux kernel version.  This version aims at Linux 2.6.now,
+ * Key open issues include:
+ *
+ *  - Lack of host-side transaction scheduling, for all transfer types.
+ *    The hardware doesn't do it; instead, software must.
+ *
+ *    This is not an issue for OTG devices that don't support external
+ *    hubs, but for more "normal" USB hosts it's a user issue that the
+ *    "multipoint" support doesn't scale in the expected ways.  That
+ *    includes DaVinci EVM in a common non-OTG mode.
+ *
+ *      * Control and bulk use dedicated endpoints, and there's as
+ *        yet no mechanism to either (a) reclaim the hardware when
+ *        peripherals are NAKing, which gets complicated with bulk
+ *        endpoints, or (b) use more than a single bulk endpoint in
+ *        each direction.
+ *
+ *        RESULT:  one device may be perceived as blocking another one.
+ *
+ *      * Interrupt and isochronous will dynamically allocate endpoint
+ *        hardware, but (a) there's no record keeping for bandwidth;
+ *        (b) in the common case that few endpoints are available, there
+ *        is no mechanism to reuse endpoints to talk to multiple devices.
+ *
+ *        RESULT:  At one extreme, bandwidth can be overcommitted in
+ *        some hardware configurations, no faults will be reported.
+ *        At the other extreme, the bandwidth capabilities which do
+ *        exist tend to be severely undercommitted.  You can't yet hook
+ *        up both a keyboard and a mouse to an external USB hub.
+ */
+
+/*
+ * This gets many kinds of configuration information:
+ *	- Kconfig for everything user-configurable
+ *	- <asm/arch/hdrc_cnf.h> for SOC or family details
+ *	- platform_device for addressing, irq, and platform_data
+ *	- platform_data is mostly for board-specific informarion
+ *
+ * Most of the conditional compilation will (someday) vanish.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#ifdef	CONFIG_ARM
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/mach-types.h>
+#endif
+
+#include "musb_core.h"
+
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include "davinci.h"
+#endif
+
+
+
+#if MUSB_DEBUG > 0
+unsigned debug = MUSB_DEBUG;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "initial debug message level");
+
+#define MUSB_VERSION_SUFFIX	"/dbg"
+#endif
+
+#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
+#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
+
+#define MUSB_VERSION_BASE "6.0"
+
+#ifndef MUSB_VERSION_SUFFIX
+#define MUSB_VERSION_SUFFIX	""
+#endif
+#define MUSB_VERSION	MUSB_VERSION_BASE MUSB_VERSION_SUFFIX
+
+#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
+
+#define MUSB_DRIVER_NAME "musb_hdrc"
+const char musb_driver_name[] = MUSB_DRIVER_NAME;
+
+MODULE_DESCRIPTION(DRIVER_INFO);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct musb *dev_to_musb(struct device *dev)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	/* usbcore insists dev->driver_data is a "struct hcd *" */
+	return hcd_to_musb(dev_get_drvdata(dev));
+#else
+	return dev_get_drvdata(dev);
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifndef CONFIG_USB_TUSB6010
+/*
+ * Load an endpoint's FIFO
+ */
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+	void __iomem *fifo = hw_ep->fifo;
+
+	prefetch((u8 *)src);
+
+	DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+			'T', hw_ep->epnum, fifo, len, src);
+
+	/* we can't assume unaligned reads work */
+	if (likely((0x01 & (unsigned long) src) == 0)) {
+		u16	index = 0;
+
+		/* best case is 32bit-aligned source address */
+		if ((0x02 & (unsigned long) src) == 0) {
+			if (len >= 4) {
+				writesl(fifo, src + index, len >> 2);
+				index += len & ~0x03;
+			}
+			if (len & 0x02) {
+				musb_writew(fifo, 0, *(u16 *)&src[index]);
+				index += 2;
+			}
+		} else {
+			if (len >= 2) {
+				writesw(fifo, src + index, len >> 1);
+				index += len & ~0x01;
+			}
+		}
+		if (len & 0x01)
+			musb_writeb(fifo, 0, src[index]);
+	} else  {
+		/* byte aligned */
+		writesb(fifo, src, len);
+	}
+}
+
+/*
+ * Unload an endpoint's FIFO
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+	void __iomem *fifo = hw_ep->fifo;
+
+	DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+			'R', hw_ep->epnum, fifo, len, dst);
+
+	/* we can't assume unaligned writes work */
+	if (likely((0x01 & (unsigned long) dst) == 0)) {
+		u16	index = 0;
+
+		/* best case is 32bit-aligned destination address */
+		if ((0x02 & (unsigned long) dst) == 0) {
+			if (len >= 4) {
+				readsl(fifo, dst, len >> 2);
+				index = len & ~0x03;
+			}
+			if (len & 0x02) {
+				*(u16 *)&dst[index] = musb_readw(fifo, 0);
+				index += 2;
+			}
+		} else {
+			if (len >= 2) {
+				readsw(fifo, dst, len >> 1);
+				index = len & ~0x01;
+			}
+		}
+		if (len & 0x01)
+			dst[index] = musb_readb(fifo, 0);
+	} else  {
+		/* byte aligned */
+		readsb(fifo, dst, len);
+	}
+}
+
+#endif	/* normal PIO */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 musb_test_packet[53] = {
+	/* implicit SYNC then DATA0 to start */
+
+	/* JKJKJKJK x9 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* JJKKJJKK x8 */
+	0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+	/* JJJJKKKK x8 */
+	0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+	/* JJJJJJJKKKKKKK x8 */
+	0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	/* JJJJJJJK x8 */
+	0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+	/* JKKKKKKK x10, JK */
+	0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+
+	/* implicit CRC16 then EOP to end */
+};
+
+void musb_load_testpacket(struct musb *musb)
+{
+	void __iomem	*regs = musb->endpoints[0].regs;
+
+	musb_ep_select(musb->mregs, 0);
+	musb_write_fifo(musb->control_ep,
+			sizeof(musb_test_packet), musb_test_packet);
+	musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
+}
+
+/*-------------------------------------------------------------------------*/
+
+const char *otg_state_string(struct musb *musb)
+{
+	switch (musb->xceiv.state) {
+	case OTG_STATE_A_IDLE:		return "a_idle";
+	case OTG_STATE_A_WAIT_VRISE:	return "a_wait_vrise";
+	case OTG_STATE_A_WAIT_BCON:	return "a_wait_bcon";
+	case OTG_STATE_A_HOST:		return "a_host";
+	case OTG_STATE_A_SUSPEND:	return "a_suspend";
+	case OTG_STATE_A_PERIPHERAL:	return "a_peripheral";
+	case OTG_STATE_A_WAIT_VFALL:	return "a_wait_vfall";
+	case OTG_STATE_A_VBUS_ERR:	return "a_vbus_err";
+	case OTG_STATE_B_IDLE:		return "b_idle";
+	case OTG_STATE_B_SRP_INIT:	return "b_srp_init";
+	case OTG_STATE_B_PERIPHERAL:	return "b_peripheral";
+	case OTG_STATE_B_WAIT_ACON:	return "b_wait_acon";
+	case OTG_STATE_B_HOST:		return "b_host";
+	default:			return "UNDEFINED";
+	}
+}
+
+#ifdef	CONFIG_USB_MUSB_OTG
+
+/*
+ * See also USB_OTG_1-3.pdf 6.6.5 Timers
+ * REVISIT: Are the other timers done in the hardware?
+ */
+#define TB_ASE0_BRST		100	/* Min 3.125 ms */
+
+/*
+ * Handles OTG hnp timeouts, such as b_ase0_brst
+ */
+void musb_otg_timer_func(unsigned long data)
+{
+	struct musb	*musb = (struct musb *)data;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	switch (musb->xceiv.state) {
+	case OTG_STATE_B_WAIT_ACON:
+		DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
+		musb_g_disconnect(musb);
+		musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+		musb->is_active = 0;
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+		DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
+		musb_hnp_stop(musb);
+		break;
+	default:
+		DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
+	}
+	musb->ignore_disconnect = 0;
+	spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
+
+/*
+ * Stops the B-device HNP state. Caller must take care of locking.
+ */
+void musb_hnp_stop(struct musb *musb)
+{
+	struct usb_hcd	*hcd = musb_to_hcd(musb);
+	void __iomem	*mbase = musb->mregs;
+	u8	reg;
+
+	switch (musb->xceiv.state) {
+	case OTG_STATE_A_PERIPHERAL:
+	case OTG_STATE_A_WAIT_VFALL:
+	case OTG_STATE_A_WAIT_BCON:
+		DBG(1, "HNP: Switching back to A-host\n");
+		musb_g_disconnect(musb);
+		musb->xceiv.state = OTG_STATE_A_IDLE;
+		MUSB_HST_MODE(musb);
+		musb->is_active = 0;
+		break;
+	case OTG_STATE_B_HOST:
+		DBG(1, "HNP: Disabling HR\n");
+		hcd->self.is_b_host = 0;
+		musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+		MUSB_DEV_MODE(musb);
+		reg = musb_readb(mbase, MUSB_POWER);
+		reg |= MUSB_POWER_SUSPENDM;
+		musb_writeb(mbase, MUSB_POWER, reg);
+		/* REVISIT: Start SESSION_REQUEST here? */
+		break;
+	default:
+		DBG(1, "HNP: Stopping in unknown state %s\n",
+			otg_state_string(musb));
+	}
+
+	/*
+	 * When returning to A state after HNP, avoid hub_port_rebounce(),
+	 * which cause occasional OPT A "Did not receive reset after connect"
+	 * errors.
+	 */
+	musb->port1_status &=
+		~(1 << USB_PORT_FEAT_C_CONNECTION);
+}
+
+#endif
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+
+#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \
+		| MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \
+		| MUSB_INTR_RESET)
+
+static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+				u8 devctl, u8 power)
+{
+	irqreturn_t handled = IRQ_NONE;
+	void __iomem *mbase = musb->mregs;
+
+	DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
+		int_usb);
+
+	/* in host mode, the peripheral may issue remote wakeup.
+	 * in peripheral mode, the host may resume the link.
+	 * spurious RESUME irqs happen too, paired with SUSPEND.
+	 */
+	if (int_usb & MUSB_INTR_RESUME) {
+		handled = IRQ_HANDLED;
+		DBG(3, "RESUME (%s)\n", otg_state_string(musb));
+
+		if (devctl & MUSB_DEVCTL_HM) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+			switch (musb->xceiv.state) {
+			case OTG_STATE_A_SUSPEND:
+				/* remote wakeup?  later, GetPortStatus
+				 * will stop RESUME signaling
+				 */
+
+				if (power & MUSB_POWER_SUSPENDM) {
+					/* spurious */
+					musb->int_usb &= ~MUSB_INTR_SUSPEND;
+					DBG(2, "Spurious SUSPENDM\n");
+					break;
+				}
+
+				power &= ~MUSB_POWER_SUSPENDM;
+				musb_writeb(mbase, MUSB_POWER,
+						power | MUSB_POWER_RESUME);
+
+				musb->port1_status |=
+						(USB_PORT_STAT_C_SUSPEND << 16)
+						| MUSB_PORT_STAT_RESUME;
+				musb->rh_timer = jiffies
+						+ msecs_to_jiffies(20);
+
+				musb->xceiv.state = OTG_STATE_A_HOST;
+				musb->is_active = 1;
+				usb_hcd_resume_root_hub(musb_to_hcd(musb));
+				break;
+			case OTG_STATE_B_WAIT_ACON:
+				musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+				musb->is_active = 1;
+				MUSB_DEV_MODE(musb);
+				break;
+			default:
+				WARNING("bogus %s RESUME (%s)\n",
+					"host",
+					otg_state_string(musb));
+			}
+#endif
+		} else {
+			switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+			case OTG_STATE_A_SUSPEND:
+				/* possibly DISCONNECT is upcoming */
+				musb->xceiv.state = OTG_STATE_A_HOST;
+				usb_hcd_resume_root_hub(musb_to_hcd(musb));
+				break;
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+			case OTG_STATE_B_WAIT_ACON:
+			case OTG_STATE_B_PERIPHERAL:
+				/* disconnect while suspended?  we may
+				 * not get a disconnect irq...
+				 */
+				if ((devctl & MUSB_DEVCTL_VBUS)
+						!= (3 << MUSB_DEVCTL_VBUS_SHIFT)
+						) {
+					musb->int_usb |= MUSB_INTR_DISCONNECT;
+					musb->int_usb &= ~MUSB_INTR_SUSPEND;
+					break;
+				}
+				musb_g_resume(musb);
+				break;
+			case OTG_STATE_B_IDLE:
+				musb->int_usb &= ~MUSB_INTR_SUSPEND;
+				break;
+#endif
+			default:
+				WARNING("bogus %s RESUME (%s)\n",
+					"peripheral",
+					otg_state_string(musb));
+			}
+		}
+	}
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	/* see manual for the order of the tests */
+	if (int_usb & MUSB_INTR_SESSREQ) {
+		DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
+
+		/* IRQ arrives from ID pin sense or (later, if VBUS power
+		 * is removed) SRP.  responses are time critical:
+		 *  - turn on VBUS (with silicon-specific mechanism)
+		 *  - go through A_WAIT_VRISE
+		 *  - ... to A_WAIT_BCON.
+		 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
+		 */
+		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+		musb->ep0_stage = MUSB_EP0_START;
+		musb->xceiv.state = OTG_STATE_A_IDLE;
+		MUSB_HST_MODE(musb);
+		musb_set_vbus(musb, 1);
+
+		handled = IRQ_HANDLED;
+	}
+
+	if (int_usb & MUSB_INTR_VBUSERROR) {
+		int	ignore = 0;
+
+		/* During connection as an A-Device, we may see a short
+		 * current spikes causing voltage drop, because of cable
+		 * and peripheral capacitance combined with vbus draw.
+		 * (So: less common with truly self-powered devices, where
+		 * vbus doesn't act like a power supply.)
+		 *
+		 * Such spikes are short; usually less than ~500 usec, max
+		 * of ~2 msec.  That is, they're not sustained overcurrent
+		 * errors, though they're reported using VBUSERROR irqs.
+		 *
+		 * Workarounds:  (a) hardware: use self powered devices.
+		 * (b) software:  ignore non-repeated VBUS errors.
+		 *
+		 * REVISIT:  do delays from lots of DEBUG_KERNEL checks
+		 * make trouble here, keeping VBUS < 4.4V ?
+		 */
+		switch (musb->xceiv.state) {
+		case OTG_STATE_A_HOST:
+			/* recovery is dicey once we've gotten past the
+			 * initial stages of enumeration, but if VBUS
+			 * stayed ok at the other end of the link, and
+			 * another reset is due (at least for high speed,
+			 * to redo the chirp etc), it might work OK...
+			 */
+		case OTG_STATE_A_WAIT_BCON:
+		case OTG_STATE_A_WAIT_VRISE:
+			if (musb->vbuserr_retry) {
+				musb->vbuserr_retry--;
+				ignore = 1;
+				devctl |= MUSB_DEVCTL_SESSION;
+				musb_writeb(mbase, MUSB_DEVCTL, devctl);
+			} else {
+				musb->port1_status |=
+					  (1 << USB_PORT_FEAT_OVER_CURRENT)
+					| (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+			}
+			break;
+		default:
+			break;
+		}
+
+		DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
+				otg_state_string(musb),
+				devctl,
+				({ char *s;
+				switch (devctl & MUSB_DEVCTL_VBUS) {
+				case 0 << MUSB_DEVCTL_VBUS_SHIFT:
+					s = "<SessEnd"; break;
+				case 1 << MUSB_DEVCTL_VBUS_SHIFT:
+					s = "<AValid"; break;
+				case 2 << MUSB_DEVCTL_VBUS_SHIFT:
+					s = "<VBusValid"; break;
+				/* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
+				default:
+					s = "VALID"; break;
+				}; s; }),
+				VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
+				musb->port1_status);
+
+		/* go through A_WAIT_VFALL then start a new session */
+		if (!ignore)
+			musb_set_vbus(musb, 0);
+		handled = IRQ_HANDLED;
+	}
+
+	if (int_usb & MUSB_INTR_CONNECT) {
+		struct usb_hcd *hcd = musb_to_hcd(musb);
+
+		handled = IRQ_HANDLED;
+		musb->is_active = 1;
+		set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+		musb->ep0_stage = MUSB_EP0_START;
+
+#ifdef CONFIG_USB_MUSB_OTG
+		/* flush endpoints when transitioning from Device Mode */
+		if (is_peripheral_active(musb)) {
+			/* REVISIT HNP; just force disconnect */
+		}
+		musb_writew(mbase, MUSB_INTRTXE, musb->epmask);
+		musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe);
+		musb_writeb(mbase, MUSB_INTRUSBE, 0xf7);
+#endif
+		musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
+					|USB_PORT_STAT_HIGH_SPEED
+					|USB_PORT_STAT_ENABLE
+					);
+		musb->port1_status |= USB_PORT_STAT_CONNECTION
+					|(USB_PORT_STAT_C_CONNECTION << 16);
+
+		/* high vs full speed is just a guess until after reset */
+		if (devctl & MUSB_DEVCTL_LSDEV)
+			musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
+
+		if (hcd->status_urb)
+			usb_hcd_poll_rh_status(hcd);
+		else
+			usb_hcd_resume_root_hub(hcd);
+
+		MUSB_HST_MODE(musb);
+
+		/* indicate new connection to OTG machine */
+		switch (musb->xceiv.state) {
+		case OTG_STATE_B_PERIPHERAL:
+			if (int_usb & MUSB_INTR_SUSPEND) {
+				DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
+				musb->xceiv.state = OTG_STATE_B_HOST;
+				hcd->self.is_b_host = 1;
+				int_usb &= ~MUSB_INTR_SUSPEND;
+			} else
+				DBG(1, "CONNECT as b_peripheral???\n");
+			break;
+		case OTG_STATE_B_WAIT_ACON:
+			DBG(1, "HNP: Waiting to switch to b_host state\n");
+			musb->xceiv.state = OTG_STATE_B_HOST;
+			hcd->self.is_b_host = 1;
+			break;
+		default:
+			if ((devctl & MUSB_DEVCTL_VBUS)
+					== (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
+				musb->xceiv.state = OTG_STATE_A_HOST;
+				hcd->self.is_b_host = 0;
+			}
+			break;
+		}
+		DBG(1, "CONNECT (%s) devctl %02x\n",
+				otg_state_string(musb), devctl);
+	}
+#endif	/* CONFIG_USB_MUSB_HDRC_HCD */
+
+	/* mentor saves a bit: bus reset and babble share the same irq.
+	 * only host sees babble; only peripheral sees bus reset.
+	 */
+	if (int_usb & MUSB_INTR_RESET) {
+		if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
+			/*
+			 * Looks like non-HS BABBLE can be ignored, but
+			 * HS BABBLE is an error condition. For HS the solution
+			 * is to avoid babble in the first place and fix what
+			 * caused BABBLE. When HS BABBLE happens we can only
+			 * stop the session.
+			 */
+			if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
+				DBG(1, "BABBLE devctl: %02x\n", devctl);
+			else {
+				ERR("Stopping host session -- babble\n");
+				musb_writeb(mbase, MUSB_DEVCTL, 0);
+			}
+		} else if (is_peripheral_capable()) {
+			DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
+			switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_OTG
+			case OTG_STATE_A_SUSPEND:
+				/* We need to ignore disconnect on suspend
+				 * otherwise tusb 2.0 won't reconnect after a
+				 * power cycle, which breaks otg compliance.
+				 */
+				musb->ignore_disconnect = 1;
+				musb_g_reset(musb);
+				/* FALLTHROUGH */
+			case OTG_STATE_A_WAIT_BCON:	/* OPT TD.4.7-900ms */
+				DBG(1, "HNP: Setting timer as %s\n",
+						otg_state_string(musb));
+				musb_otg_timer.data = (unsigned long)musb;
+				mod_timer(&musb_otg_timer, jiffies
+					+ msecs_to_jiffies(100));
+				break;
+			case OTG_STATE_A_PERIPHERAL:
+				musb_hnp_stop(musb);
+				break;
+			case OTG_STATE_B_WAIT_ACON:
+				DBG(1, "HNP: RESET (%s), to b_peripheral\n",
+					otg_state_string(musb));
+				musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+				musb_g_reset(musb);
+				break;
+#endif
+			case OTG_STATE_B_IDLE:
+				musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+				/* FALLTHROUGH */
+			case OTG_STATE_B_PERIPHERAL:
+				musb_g_reset(musb);
+				break;
+			default:
+				DBG(1, "Unhandled BUS RESET as %s\n",
+					otg_state_string(musb));
+			}
+		}
+
+		handled = IRQ_HANDLED;
+	}
+	schedule_work(&musb->irq_work);
+
+	return handled;
+}
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
+				u8 devctl, u8 power)
+{
+	irqreturn_t handled = IRQ_NONE;
+
+#if 0
+/* REVISIT ... this would be for multiplexing periodic endpoints, or
+ * supporting transfer phasing to prevent exceeding ISO bandwidth
+ * limits of a given frame or microframe.
+ *
+ * It's not needed for peripheral side, which dedicates endpoints;
+ * though it _might_ use SOF irqs for other purposes.
+ *
+ * And it's not currently needed for host side, which also dedicates
+ * endpoints, relies on TX/RX interval registers, and isn't claimed
+ * to support ISO transfers yet.
+ */
+	if (int_usb & MUSB_INTR_SOF) {
+		void __iomem *mbase = musb->mregs;
+		struct musb_hw_ep	*ep;
+		u8 epnum;
+		u16 frame;
+
+		DBG(6, "START_OF_FRAME\n");
+		handled = IRQ_HANDLED;
+
+		/* start any periodic Tx transfers waiting for current frame */
+		frame = musb_readw(mbase, MUSB_FRAME);
+		ep = musb->endpoints;
+		for (epnum = 1; (epnum < musb->nr_endpoints)
+					&& (musb->epmask >= (1 << epnum));
+				epnum++, ep++) {
+			/*
+			 * FIXME handle framecounter wraps (12 bits)
+			 * eliminate duplicated StartUrb logic
+			 */
+			if (ep->dwWaitFrame >= frame) {
+				ep->dwWaitFrame = 0;
+				pr_debug("SOF --> periodic TX%s on %d\n",
+					ep->tx_channel ? " DMA" : "",
+					epnum);
+				if (!ep->tx_channel)
+					musb_h_tx_start(musb, epnum);
+				else
+					cppi_hostdma_start(musb, epnum);
+			}
+		}		/* end of for loop */
+	}
+#endif
+
+	if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
+		DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
+				otg_state_string(musb),
+				MUSB_MODE(musb), devctl);
+		handled = IRQ_HANDLED;
+
+		switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+		case OTG_STATE_A_HOST:
+		case OTG_STATE_A_SUSPEND:
+			musb_root_disconnect(musb);
+			if (musb->a_wait_bcon != 0)
+				musb_platform_try_idle(musb, jiffies
+					+ msecs_to_jiffies(musb->a_wait_bcon));
+			break;
+#endif	/* HOST */
+#ifdef CONFIG_USB_MUSB_OTG
+		case OTG_STATE_B_HOST:
+			musb_hnp_stop(musb);
+			break;
+		case OTG_STATE_A_PERIPHERAL:
+			musb_hnp_stop(musb);
+			musb_root_disconnect(musb);
+			/* FALLTHROUGH */
+		case OTG_STATE_B_WAIT_ACON:
+			/* FALLTHROUGH */
+#endif	/* OTG */
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+		case OTG_STATE_B_PERIPHERAL:
+		case OTG_STATE_B_IDLE:
+			musb_g_disconnect(musb);
+			break;
+#endif	/* GADGET */
+		default:
+			WARNING("unhandled DISCONNECT transition (%s)\n",
+				otg_state_string(musb));
+			break;
+		}
+
+		schedule_work(&musb->irq_work);
+	}
+
+	if (int_usb & MUSB_INTR_SUSPEND) {
+		DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
+				otg_state_string(musb), devctl, power);
+		handled = IRQ_HANDLED;
+
+		switch (musb->xceiv.state) {
+#ifdef	CONFIG_USB_MUSB_OTG
+		case OTG_STATE_A_PERIPHERAL:
+			/*
+			 * We cannot stop HNP here, devctl BDEVICE might be
+			 * still set.
+			 */
+			break;
+#endif
+		case OTG_STATE_B_PERIPHERAL:
+			musb_g_suspend(musb);
+			musb->is_active = is_otg_enabled(musb)
+					&& musb->xceiv.gadget->b_hnp_enable;
+			if (musb->is_active) {
+#ifdef	CONFIG_USB_MUSB_OTG
+				musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+				DBG(1, "HNP: Setting timer for b_ase0_brst\n");
+				musb_otg_timer.data = (unsigned long)musb;
+				mod_timer(&musb_otg_timer, jiffies
+					+ msecs_to_jiffies(TB_ASE0_BRST));
+#endif
+			}
+			break;
+		case OTG_STATE_A_WAIT_BCON:
+			if (musb->a_wait_bcon != 0)
+				musb_platform_try_idle(musb, jiffies
+					+ msecs_to_jiffies(musb->a_wait_bcon));
+			break;
+		case OTG_STATE_A_HOST:
+			musb->xceiv.state = OTG_STATE_A_SUSPEND;
+			musb->is_active = is_otg_enabled(musb)
+					&& musb->xceiv.host->b_hnp_enable;
+			break;
+		case OTG_STATE_B_HOST:
+			/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+			DBG(1, "REVISIT: SUSPEND as B_HOST\n");
+			break;
+		default:
+			/* "should not happen" */
+			musb->is_active = 0;
+			break;
+		}
+		schedule_work(&musb->irq_work);
+	}
+
+
+	return handled;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+* Program the HDRC to start (enable interrupts, dma, etc.).
+*/
+void musb_start(struct musb *musb)
+{
+	void __iomem	*regs = musb->mregs;
+	u8		devctl = musb_readb(regs, MUSB_DEVCTL);
+
+	DBG(2, "<== devctl %02x\n", devctl);
+
+	/*  Set INT enable registers, enable interrupts */
+	musb_writew(regs, MUSB_INTRTXE, musb->epmask);
+	musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe);
+	musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+	musb_writeb(regs, MUSB_TESTMODE, 0);
+
+	/* put into basic highspeed mode and start session */
+	musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
+						| MUSB_POWER_SOFTCONN
+						| MUSB_POWER_HSENAB
+						/* ENSUSPEND wedges tusb */
+						/* | MUSB_POWER_ENSUSPEND */
+						);
+
+	musb->is_active = 0;
+	devctl = musb_readb(regs, MUSB_DEVCTL);
+	devctl &= ~MUSB_DEVCTL_SESSION;
+
+	if (is_otg_enabled(musb)) {
+		/* session started after:
+		 * (a) ID-grounded irq, host mode;
+		 * (b) vbus present/connect IRQ, peripheral mode;
+		 * (c) peripheral initiates, using SRP
+		 */
+		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+			musb->is_active = 1;
+		else
+			devctl |= MUSB_DEVCTL_SESSION;
+
+	} else if (is_host_enabled(musb)) {
+		/* assume ID pin is hard-wired to ground */
+		devctl |= MUSB_DEVCTL_SESSION;
+
+	} else /* peripheral is enabled */ {
+		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+			musb->is_active = 1;
+	}
+	musb_platform_enable(musb);
+	musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
+
+
+static void musb_generic_disable(struct musb *musb)
+{
+	void __iomem	*mbase = musb->mregs;
+	u16	temp;
+
+	/* disable interrupts */
+	musb_writeb(mbase, MUSB_INTRUSBE, 0);
+	musb_writew(mbase, MUSB_INTRTXE, 0);
+	musb_writew(mbase, MUSB_INTRRXE, 0);
+
+	/* off */
+	musb_writeb(mbase, MUSB_DEVCTL, 0);
+
+	/*  flush pending interrupts */
+	temp = musb_readb(mbase, MUSB_INTRUSB);
+	temp = musb_readw(mbase, MUSB_INTRTX);
+	temp = musb_readw(mbase, MUSB_INTRRX);
+
+}
+
+/*
+ * Make the HDRC stop (disable interrupts, etc.);
+ * reversible by musb_start
+ * called on gadget driver unregister
+ * with controller locked, irqs blocked
+ * acts as a NOP unless some role activated the hardware
+ */
+void musb_stop(struct musb *musb)
+{
+	/* stop IRQs, timers, ... */
+	musb_platform_disable(musb);
+	musb_generic_disable(musb);
+	DBG(3, "HDRC disabled\n");
+
+	/* FIXME
+	 *  - mark host and/or peripheral drivers unusable/inactive
+	 *  - disable DMA (and enable it in HdrcStart)
+	 *  - make sure we can musb_start() after musb_stop(); with
+	 *    OTG mode, gadget driver module rmmod/modprobe cycles that
+	 *  - ...
+	 */
+	musb_platform_try_idle(musb, 0);
+}
+
+static void musb_shutdown(struct platform_device *pdev)
+{
+	struct musb	*musb = dev_to_musb(&pdev->dev);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	musb_platform_disable(musb);
+	musb_generic_disable(musb);
+	if (musb->clock) {
+		clk_put(musb->clock);
+		musb->clock = NULL;
+	}
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	/* FIXME power down */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The silicon either has hard-wired endpoint configurations, or else
+ * "dynamic fifo" sizing.  The driver has support for both, though at this
+ * writing only the dynamic sizing is very well tested.   We use normal
+ * idioms to so both modes are compile-tested, but dead code elimination
+ * leaves only the relevant one in the object file.
+ *
+ * We don't currently use dynamic fifo setup capability to do anything
+ * more than selecting one of a bunch of predefined configurations.
+ */
+#if defined(CONFIG_USB_TUSB6010) || \
+	defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+static ushort __initdata fifo_mode = 4;
+#else
+static ushort __initdata fifo_mode = 2;
+#endif
+
+/* "modprobe ... fifo_mode=1" etc */
+module_param(fifo_mode, ushort, 0);
+MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
+
+
+enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
+enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
+
+struct fifo_cfg {
+	u8		hw_ep_num;
+	enum fifo_style	style;
+	enum buf_mode	mode;
+	u16		maxpacket;
+};
+
+/*
+ * tables defining fifo_mode values.  define more if you like.
+ * for host side, make sure both halves of ep1 are set up.
+ */
+
+/* mode 0 - fits in 2KB */
+static struct fifo_cfg __initdata mode_0_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 1 - fits in 4KB */
+static struct fifo_cfg __initdata mode_1_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 2 - fits in 4KB */
+static struct fifo_cfg __initdata mode_2_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 3 - fits in 4KB */
+static struct fifo_cfg __initdata mode_3_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 4 - fits in 16KB */
+static struct fifo_cfg __initdata mode_4_cfg[] = {
+{ .hw_ep_num =  1, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  1, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  2, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  2, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  3, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  3, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  4, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  4, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  5, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  5, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  6, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  6, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
+
+
+/*
+ * configure a fifo; for non-shared endpoints, this may be called
+ * once for a tx fifo and once for an rx fifo.
+ *
+ * returns negative errno or offset for next fifo.
+ */
+static int __init
+fifo_setup(struct musb *musb, struct musb_hw_ep  *hw_ep,
+		const struct fifo_cfg *cfg, u16 offset)
+{
+	void __iomem	*mbase = musb->mregs;
+	int	size = 0;
+	u16	maxpacket = cfg->maxpacket;
+	u16	c_off = offset >> 3;
+	u8	c_size;
+
+	/* expect hw_ep has already been zero-initialized */
+
+	size = ffs(max(maxpacket, (u16) 8)) - 1;
+	maxpacket = 1 << size;
+
+	c_size = size - 3;
+	if (cfg->mode == BUF_DOUBLE) {
+		if ((offset + (maxpacket << 1)) >
+				(1 << (musb->config->ram_bits + 2)))
+			return -EMSGSIZE;
+		c_size |= MUSB_FIFOSZ_DPB;
+	} else {
+		if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
+			return -EMSGSIZE;
+	}
+
+	/* configure the FIFO */
+	musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	/* EP0 reserved endpoint for control, bidirectional;
+	 * EP1 reserved for bulk, two unidirection halves.
+	 */
+	if (hw_ep->epnum == 1)
+		musb->bulk_ep = hw_ep;
+	/* REVISIT error check:  be sure ep0 can both rx and tx ... */
+#endif
+	switch (cfg->style) {
+	case FIFO_TX:
+		musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+		musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+		hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+		hw_ep->max_packet_sz_tx = maxpacket;
+		break;
+	case FIFO_RX:
+		musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+		musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+		hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+		hw_ep->max_packet_sz_rx = maxpacket;
+		break;
+	case FIFO_RXTX:
+		musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+		musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+		hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+		hw_ep->max_packet_sz_rx = maxpacket;
+
+		musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+		musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+		hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
+		hw_ep->max_packet_sz_tx = maxpacket;
+
+		hw_ep->is_shared_fifo = true;
+		break;
+	}
+
+	/* NOTE rx and tx endpoint irqs aren't managed separately,
+	 * which happens to be ok
+	 */
+	musb->epmask |= (1 << hw_ep->epnum);
+
+	return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
+}
+
+static struct fifo_cfg __initdata ep0_cfg = {
+	.style = FIFO_RXTX, .maxpacket = 64,
+};
+
+static int __init ep_config_from_table(struct musb *musb)
+{
+	const struct fifo_cfg	*cfg;
+	unsigned		i, n;
+	int			offset;
+	struct musb_hw_ep	*hw_ep = musb->endpoints;
+
+	switch (fifo_mode) {
+	default:
+		fifo_mode = 0;
+		/* FALLTHROUGH */
+	case 0:
+		cfg = mode_0_cfg;
+		n = ARRAY_SIZE(mode_0_cfg);
+		break;
+	case 1:
+		cfg = mode_1_cfg;
+		n = ARRAY_SIZE(mode_1_cfg);
+		break;
+	case 2:
+		cfg = mode_2_cfg;
+		n = ARRAY_SIZE(mode_2_cfg);
+		break;
+	case 3:
+		cfg = mode_3_cfg;
+		n = ARRAY_SIZE(mode_3_cfg);
+		break;
+	case 4:
+		cfg = mode_4_cfg;
+		n = ARRAY_SIZE(mode_4_cfg);
+		break;
+	}
+
+	printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
+			musb_driver_name, fifo_mode);
+
+
+	offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
+	/* assert(offset > 0) */
+
+	/* NOTE:  for RTL versions >= 1.400 EPINFO and RAMINFO would
+	 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
+	 */
+
+	for (i = 0; i < n; i++) {
+		u8	epn = cfg->hw_ep_num;
+
+		if (epn >= musb->config->num_eps) {
+			pr_debug("%s: invalid ep %d\n",
+					musb_driver_name, epn);
+			continue;
+		}
+		offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
+		if (offset < 0) {
+			pr_debug("%s: mem overrun, ep %d\n",
+					musb_driver_name, epn);
+			return -EINVAL;
+		}
+		epn++;
+		musb->nr_endpoints = max(epn, musb->nr_endpoints);
+	}
+
+	printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n",
+			musb_driver_name,
+			n + 1, musb->config->num_eps * 2 - 1,
+			offset, (1 << (musb->config->ram_bits + 2)));
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	if (!musb->bulk_ep) {
+		pr_debug("%s: missing bulk\n", musb_driver_name);
+		return -EINVAL;
+	}
+#endif
+
+	return 0;
+}
+
+
+/*
+ * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
+ * @param musb the controller
+ */
+static int __init ep_config_from_hw(struct musb *musb)
+{
+	u8 epnum = 0, reg;
+	struct musb_hw_ep *hw_ep;
+	void *mbase = musb->mregs;
+
+	DBG(2, "<== static silicon ep config\n");
+
+	/* FIXME pick up ep0 maxpacket size */
+
+	for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
+		musb_ep_select(mbase, epnum);
+		hw_ep = musb->endpoints + epnum;
+
+		/* read from core using indexed model */
+		reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
+		if (!reg) {
+			/* 0's returned when no more endpoints */
+			break;
+		}
+		musb->nr_endpoints++;
+		musb->epmask |= (1 << epnum);
+
+		hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
+
+		/* shared TX/RX FIFO? */
+		if ((reg & 0xf0) == 0xf0) {
+			hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
+			hw_ep->is_shared_fifo = true;
+			continue;
+		} else {
+			hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
+			hw_ep->is_shared_fifo = false;
+		}
+
+		/* FIXME set up hw_ep->{rx,tx}_double_buffered */
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+		/* pick an RX/TX endpoint for bulk */
+		if (hw_ep->max_packet_sz_tx < 512
+				|| hw_ep->max_packet_sz_rx < 512)
+			continue;
+
+		/* REVISIT:  this algorithm is lazy, we should at least
+		 * try to pick a double buffered endpoint.
+		 */
+		if (musb->bulk_ep)
+			continue;
+		musb->bulk_ep = hw_ep;
+#endif
+	}
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	if (!musb->bulk_ep) {
+		pr_debug("%s: missing bulk\n", musb_driver_name);
+		return -EINVAL;
+	}
+#endif
+
+	return 0;
+}
+
+enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
+
+/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
+ * configure endpoints, or take their config from silicon
+ */
+static int __init musb_core_init(u16 musb_type, struct musb *musb)
+{
+#ifdef MUSB_AHB_ID
+	u32 data;
+#endif
+	u8 reg;
+	char *type;
+	u16 hwvers, rev_major, rev_minor;
+	char aInfo[78], aRevision[32], aDate[12];
+	void __iomem	*mbase = musb->mregs;
+	int		status = 0;
+	int		i;
+
+	/* log core options (read using indexed model) */
+	musb_ep_select(mbase, 0);
+	reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+
+	strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
+	if (reg & MUSB_CONFIGDATA_DYNFIFO)
+		strcat(aInfo, ", dyn FIFOs");
+	if (reg & MUSB_CONFIGDATA_MPRXE) {
+		strcat(aInfo, ", bulk combine");
+#ifdef C_MP_RX
+		musb->bulk_combine = true;
+#else
+		strcat(aInfo, " (X)");		/* no driver support */
+#endif
+	}
+	if (reg & MUSB_CONFIGDATA_MPTXE) {
+		strcat(aInfo, ", bulk split");
+#ifdef C_MP_TX
+		musb->bulk_split = true;
+#else
+		strcat(aInfo, " (X)");		/* no driver support */
+#endif
+	}
+	if (reg & MUSB_CONFIGDATA_HBRXE) {
+		strcat(aInfo, ", HB-ISO Rx");
+		strcat(aInfo, " (X)");		/* no driver support */
+	}
+	if (reg & MUSB_CONFIGDATA_HBTXE) {
+		strcat(aInfo, ", HB-ISO Tx");
+		strcat(aInfo, " (X)");		/* no driver support */
+	}
+	if (reg & MUSB_CONFIGDATA_SOFTCONE)
+		strcat(aInfo, ", SoftConn");
+
+	printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
+			musb_driver_name, reg, aInfo);
+
+#ifdef MUSB_AHB_ID
+	data = musb_readl(mbase, 0x404);
+	sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
+		(data >> 16) & 0xff, (data >> 24) & 0xff);
+	/* FIXME ID2 and ID3 are unused */
+	data = musb_readl(mbase, 0x408);
+	printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
+	data = musb_readl(mbase, 0x40c);
+	printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
+	reg = musb_readb(mbase, 0x400);
+	musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
+#else
+	aDate[0] = 0;
+#endif
+	if (MUSB_CONTROLLER_MHDRC == musb_type) {
+		musb->is_multipoint = 1;
+		type = "M";
+	} else {
+		musb->is_multipoint = 0;
+		type = "";
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#ifndef	CONFIG_USB_OTG_BLACKLIST_HUB
+		printk(KERN_ERR
+			"%s: kernel must blacklist external hubs\n",
+			musb_driver_name);
+#endif
+#endif
+	}
+
+	/* log release info */
+	hwvers = musb_readw(mbase, MUSB_HWVERS);
+	rev_major = (hwvers >> 10) & 0x1f;
+	rev_minor = hwvers & 0x3ff;
+	snprintf(aRevision, 32, "%d.%d%s", rev_major,
+		rev_minor, (hwvers & 0x8000) ? "RC" : "");
+	printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
+			musb_driver_name, type, aRevision, aDate);
+
+	/* configure ep0 */
+	musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+	musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+
+	/* discover endpoint configuration */
+	musb->nr_endpoints = 1;
+	musb->epmask = 1;
+
+	if (reg & MUSB_CONFIGDATA_DYNFIFO) {
+		if (musb->config->dyn_fifo)
+			status = ep_config_from_table(musb);
+		else {
+			ERR("reconfigure software for Dynamic FIFOs\n");
+			status = -ENODEV;
+		}
+	} else {
+		if (!musb->config->dyn_fifo)
+			status = ep_config_from_hw(musb);
+		else {
+			ERR("reconfigure software for static FIFOs\n");
+			return -ENODEV;
+		}
+	}
+
+	if (status < 0)
+		return status;
+
+	/* finish init, and print endpoint config */
+	for (i = 0; i < musb->nr_endpoints; i++) {
+		struct musb_hw_ep	*hw_ep = musb->endpoints + i;
+
+		hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
+#ifdef CONFIG_USB_TUSB6010
+		hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
+		hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
+		hw_ep->fifo_sync_va =
+			musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
+
+		if (i == 0)
+			hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
+		else
+			hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
+#endif
+
+		hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+		hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase;
+		hw_ep->rx_reinit = 1;
+		hw_ep->tx_reinit = 1;
+#endif
+
+		if (hw_ep->max_packet_sz_tx) {
+			printk(KERN_DEBUG
+				"%s: hw_ep %d%s, %smax %d\n",
+				musb_driver_name, i,
+				hw_ep->is_shared_fifo ? "shared" : "tx",
+				hw_ep->tx_double_buffered
+					? "doublebuffer, " : "",
+				hw_ep->max_packet_sz_tx);
+		}
+		if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
+			printk(KERN_DEBUG
+				"%s: hw_ep %d%s, %smax %d\n",
+				musb_driver_name, i,
+				"rx",
+				hw_ep->rx_double_buffered
+					? "doublebuffer, " : "",
+				hw_ep->max_packet_sz_rx);
+		}
+		if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
+			DBG(1, "hw_ep %d not configured\n", i);
+	}
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+	unsigned long	flags;
+	irqreturn_t	retval = IRQ_NONE;
+	struct musb	*musb = __hci;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+	musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+	musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+	if (musb->int_usb || musb->int_tx || musb->int_rx)
+		retval = musb_interrupt(musb);
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	/* REVISIT we sometimes get spurious IRQs on g_ep0
+	 * not clear why...
+	 */
+	if (retval != IRQ_HANDLED)
+		DBG(5, "spurious?\n");
+
+	return IRQ_HANDLED;
+}
+
+#else
+#define generic_interrupt	NULL
+#endif
+
+/*
+ * handle all the irqs defined by the HDRC core. for now we expect:  other
+ * irq sources (phy, dma, etc) will be handled first, musb->int_* values
+ * will be assigned, and the irq will already have been acked.
+ *
+ * called in irq context with spinlock held, irqs blocked
+ */
+irqreturn_t musb_interrupt(struct musb *musb)
+{
+	irqreturn_t	retval = IRQ_NONE;
+	u8		devctl, power;
+	int		ep_num;
+	u32		reg;
+
+	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+	power = musb_readb(musb->mregs, MUSB_POWER);
+
+	DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n",
+		(devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
+		musb->int_usb, musb->int_tx, musb->int_rx);
+
+	/* the core can interrupt us for multiple reasons; docs have
+	 * a generic interrupt flowchart to follow
+	 */
+	if (musb->int_usb & STAGE0_MASK)
+		retval |= musb_stage0_irq(musb, musb->int_usb,
+				devctl, power);
+
+	/* "stage 1" is handling endpoint irqs */
+
+	/* handle endpoint 0 first */
+	if (musb->int_tx & 1) {
+		if (devctl & MUSB_DEVCTL_HM)
+			retval |= musb_h_ep0_irq(musb);
+		else
+			retval |= musb_g_ep0_irq(musb);
+	}
+
+	/* RX on endpoints 1-15 */
+	reg = musb->int_rx >> 1;
+	ep_num = 1;
+	while (reg) {
+		if (reg & 1) {
+			/* musb_ep_select(musb->mregs, ep_num); */
+			/* REVISIT just retval = ep->rx_irq(...) */
+			retval = IRQ_HANDLED;
+			if (devctl & MUSB_DEVCTL_HM) {
+				if (is_host_capable())
+					musb_host_rx(musb, ep_num);
+			} else {
+				if (is_peripheral_capable())
+					musb_g_rx(musb, ep_num);
+			}
+		}
+
+		reg >>= 1;
+		ep_num++;
+	}
+
+	/* TX on endpoints 1-15 */
+	reg = musb->int_tx >> 1;
+	ep_num = 1;
+	while (reg) {
+		if (reg & 1) {
+			/* musb_ep_select(musb->mregs, ep_num); */
+			/* REVISIT just retval |= ep->tx_irq(...) */
+			retval = IRQ_HANDLED;
+			if (devctl & MUSB_DEVCTL_HM) {
+				if (is_host_capable())
+					musb_host_tx(musb, ep_num);
+			} else {
+				if (is_peripheral_capable())
+					musb_g_tx(musb, ep_num);
+			}
+		}
+		reg >>= 1;
+		ep_num++;
+	}
+
+	/* finish handling "global" interrupts after handling fifos */
+	if (musb->int_usb)
+		retval |= musb_stage2_irq(musb,
+				musb->int_usb, devctl, power);
+
+	return retval;
+}
+
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static int __initdata use_dma = 1;
+
+/* "modprobe ... use_dma=0" etc */
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+
+void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+{
+	u8	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+	/* called with controller lock already held */
+
+	if (!epnum) {
+#ifndef CONFIG_USB_TUSB_OMAP_DMA
+		if (!is_cppi_enabled()) {
+			/* endpoint 0 */
+			if (devctl & MUSB_DEVCTL_HM)
+				musb_h_ep0_irq(musb);
+			else
+				musb_g_ep0_irq(musb);
+		}
+#endif
+	} else {
+		/* endpoints 1..15 */
+		if (transmit) {
+			if (devctl & MUSB_DEVCTL_HM) {
+				if (is_host_capable())
+					musb_host_tx(musb, epnum);
+			} else {
+				if (is_peripheral_capable())
+					musb_g_tx(musb, epnum);
+			}
+		} else {
+			/* receive */
+			if (devctl & MUSB_DEVCTL_HM) {
+				if (is_host_capable())
+					musb_host_rx(musb, epnum);
+			} else {
+				if (is_peripheral_capable())
+					musb_g_rx(musb, epnum);
+			}
+		}
+	}
+}
+
+#else
+#define use_dma			0
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_SYSFS
+
+static ssize_t
+musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct musb *musb = dev_to_musb(dev);
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	ret = sprintf(buf, "%s\n", otg_state_string(musb));
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	return ret;
+}
+
+static ssize_t
+musb_mode_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t n)
+{
+	struct musb	*musb = dev_to_musb(dev);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	if (!strncmp(buf, "host", 4))
+		musb_platform_set_mode(musb, MUSB_HOST);
+	if (!strncmp(buf, "peripheral", 10))
+		musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+	if (!strncmp(buf, "otg", 3))
+		musb_platform_set_mode(musb, MUSB_OTG);
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	return n;
+}
+static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
+
+static ssize_t
+musb_vbus_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t n)
+{
+	struct musb	*musb = dev_to_musb(dev);
+	unsigned long	flags;
+	unsigned long	val;
+
+	if (sscanf(buf, "%lu", &val) < 1) {
+		printk(KERN_ERR "Invalid VBUS timeout ms value\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&musb->lock, flags);
+	musb->a_wait_bcon = val;
+	if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
+		musb->is_active = 0;
+	musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	return n;
+}
+
+static ssize_t
+musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct musb	*musb = dev_to_musb(dev);
+	unsigned long	flags;
+	unsigned long	val;
+	int		vbus;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	val = musb->a_wait_bcon;
+	vbus = musb_platform_get_vbus_status(musb);
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	return sprintf(buf, "Vbus %s, timeout %lu\n",
+			vbus ? "on" : "off", val);
+}
+static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* Gadget drivers can't know that a host is connected so they might want
+ * to start SRP, but users can.  This allows userspace to trigger SRP.
+ */
+static ssize_t
+musb_srp_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t n)
+{
+	struct musb	*musb = dev_to_musb(dev);
+	unsigned short	srp;
+
+	if (sscanf(buf, "%hu", &srp) != 1
+			|| (srp != 1)) {
+		printk(KERN_ERR "SRP: Value must be 1\n");
+		return -EINVAL;
+	}
+
+	if (srp == 1)
+		musb_g_wakeup(musb);
+
+	return n;
+}
+static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
+
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+
+#endif	/* sysfs */
+
+/* Only used to provide driver mode change events */
+static void musb_irq_work(struct work_struct *data)
+{
+	struct musb *musb = container_of(data, struct musb, irq_work);
+	static int old_state;
+
+	if (musb->xceiv.state != old_state) {
+		old_state = musb->xceiv.state;
+		sysfs_notify(&musb->controller->kobj, NULL, "mode");
+	}
+}
+
+/* --------------------------------------------------------------------------
+ * Init support
+ */
+
+static struct musb *__init
+allocate_instance(struct device *dev,
+		struct musb_hdrc_config *config, void __iomem *mbase)
+{
+	struct musb		*musb;
+	struct musb_hw_ep	*ep;
+	int			epnum;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	struct usb_hcd	*hcd;
+
+	hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
+	if (!hcd)
+		return NULL;
+	/* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+
+	musb = hcd_to_musb(hcd);
+	INIT_LIST_HEAD(&musb->control);
+	INIT_LIST_HEAD(&musb->in_bulk);
+	INIT_LIST_HEAD(&musb->out_bulk);
+
+	hcd->uses_new_polling = 1;
+
+	musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+#else
+	musb = kzalloc(sizeof *musb, GFP_KERNEL);
+	if (!musb)
+		return NULL;
+	dev_set_drvdata(dev, musb);
+
+#endif
+
+	musb->mregs = mbase;
+	musb->ctrl_base = mbase;
+	musb->nIrq = -ENODEV;
+	musb->config = config;
+	for (epnum = 0, ep = musb->endpoints;
+			epnum < musb->config->num_eps;
+			epnum++, ep++) {
+
+		ep->musb = musb;
+		ep->epnum = epnum;
+	}
+
+	musb->controller = dev;
+	return musb;
+}
+
+static void musb_free(struct musb *musb)
+{
+	/* this has multiple entry modes. it handles fault cleanup after
+	 * probe(), where things may be partially set up, as well as rmmod
+	 * cleanup after everything's been de-activated.
+	 */
+
+#ifdef CONFIG_SYSFS
+	device_remove_file(musb->controller, &dev_attr_mode);
+	device_remove_file(musb->controller, &dev_attr_vbus);
+#ifdef CONFIG_USB_MUSB_OTG
+	device_remove_file(musb->controller, &dev_attr_srp);
+#endif
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+	musb_gadget_cleanup(musb);
+#endif
+
+	if (musb->nIrq >= 0) {
+		disable_irq_wake(musb->nIrq);
+		free_irq(musb->nIrq, musb);
+	}
+	if (is_dma_capable() && musb->dma_controller) {
+		struct dma_controller	*c = musb->dma_controller;
+
+		(void) c->stop(c);
+		dma_controller_destroy(c);
+	}
+
+	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+	musb_platform_exit(musb);
+	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
+	if (musb->clock) {
+		clk_disable(musb->clock);
+		clk_put(musb->clock);
+	}
+
+#ifdef CONFIG_USB_MUSB_OTG
+	put_device(musb->xceiv.dev);
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	usb_put_hcd(musb_to_hcd(musb));
+#else
+	kfree(musb);
+#endif
+}
+
+/*
+ * Perform generic per-controller initialization.
+ *
+ * @pDevice: the controller (already clocked, etc)
+ * @nIrq: irq
+ * @mregs: virtual address of controller registers,
+ *	not yet corrected for platform-specific offsets
+ */
+static int __init
+musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+{
+	int			status;
+	struct musb		*musb;
+	struct musb_hdrc_platform_data *plat = dev->platform_data;
+
+	/* The driver might handle more features than the board; OK.
+	 * Fail when the board needs a feature that's not enabled.
+	 */
+	if (!plat) {
+		dev_dbg(dev, "no platform_data?\n");
+		return -ENODEV;
+	}
+	switch (plat->mode) {
+	case MUSB_HOST:
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+		break;
+#else
+		goto bad_config;
+#endif
+	case MUSB_PERIPHERAL:
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+		break;
+#else
+		goto bad_config;
+#endif
+	case MUSB_OTG:
+#ifdef CONFIG_USB_MUSB_OTG
+		break;
+#else
+bad_config:
+#endif
+	default:
+		dev_err(dev, "incompatible Kconfig role setting\n");
+		return -EINVAL;
+	}
+
+	/* allocate */
+	musb = allocate_instance(dev, plat->config, ctrl);
+	if (!musb)
+		return -ENOMEM;
+
+	spin_lock_init(&musb->lock);
+	musb->board_mode = plat->mode;
+	musb->board_set_power = plat->set_power;
+	musb->set_clock = plat->set_clock;
+	musb->min_power = plat->min_power;
+
+	/* Clock usage is chip-specific ... functional clock (DaVinci,
+	 * OMAP2430), or PHY ref (some TUSB6010 boards).  All this core
+	 * code does is make sure a clock handle is available; platform
+	 * code manages it during start/stop and suspend/resume.
+	 */
+	if (plat->clock) {
+		musb->clock = clk_get(dev, plat->clock);
+		if (IS_ERR(musb->clock)) {
+			status = PTR_ERR(musb->clock);
+			musb->clock = NULL;
+			goto fail;
+		}
+	}
+
+	/* assume vbus is off */
+
+	/* platform adjusts musb->mregs and musb->isr if needed,
+	 * and activates clocks
+	 */
+	musb->isr = generic_interrupt;
+	status = musb_platform_init(musb);
+
+	if (status < 0)
+		goto fail;
+	if (!musb->isr) {
+		status = -ENODEV;
+		goto fail2;
+	}
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+	if (use_dma && dev->dma_mask) {
+		struct dma_controller	*c;
+
+		c = dma_controller_create(musb, musb->mregs);
+		musb->dma_controller = c;
+		if (c)
+			(void) c->start(c);
+	}
+#endif
+	/* ideally this would be abstracted in platform setup */
+	if (!is_dma_capable() || !musb->dma_controller)
+		dev->dma_mask = NULL;
+
+	/* be sure interrupts are disabled before connecting ISR */
+	musb_platform_disable(musb);
+	musb_generic_disable(musb);
+
+	/* setup musb parts of the core (especially endpoints) */
+	status = musb_core_init(plat->config->multipoint
+			? MUSB_CONTROLLER_MHDRC
+			: MUSB_CONTROLLER_HDRC, musb);
+	if (status < 0)
+		goto fail2;
+
+	/* Init IRQ workqueue before request_irq */
+	INIT_WORK(&musb->irq_work, musb_irq_work);
+
+	/* attach to the IRQ */
+	if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) {
+		dev_err(dev, "request_irq %d failed!\n", nIrq);
+		status = -ENODEV;
+		goto fail2;
+	}
+	musb->nIrq = nIrq;
+/* FIXME this handles wakeup irqs wrong */
+	if (enable_irq_wake(nIrq) == 0)
+		device_init_wakeup(dev, 1);
+
+	pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
+			musb_driver_name,
+			({char *s;
+			switch (musb->board_mode) {
+			case MUSB_HOST:		s = "Host"; break;
+			case MUSB_PERIPHERAL:	s = "Peripheral"; break;
+			default:		s = "OTG"; break;
+			}; s; }),
+			ctrl,
+			(is_dma_capable() && musb->dma_controller)
+				? "DMA" : "PIO",
+			musb->nIrq);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	/* host side needs more setup, except for no-host modes */
+	if (musb->board_mode != MUSB_PERIPHERAL) {
+		struct usb_hcd	*hcd = musb_to_hcd(musb);
+
+		if (musb->board_mode == MUSB_OTG)
+			hcd->self.otg_port = 1;
+		musb->xceiv.host = &hcd->self;
+		hcd->power_budget = 2 * (plat->power ? : 250);
+	}
+#endif				/* CONFIG_USB_MUSB_HDRC_HCD */
+
+	/* For the host-only role, we can activate right away.
+	 * (We expect the ID pin to be forcibly grounded!!)
+	 * Otherwise, wait till the gadget driver hooks up.
+	 */
+	if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
+		MUSB_HST_MODE(musb);
+		musb->xceiv.default_a = 1;
+		musb->xceiv.state = OTG_STATE_A_IDLE;
+
+		status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+
+		DBG(1, "%s mode, status %d, devctl %02x %c\n",
+			"HOST", status,
+			musb_readb(musb->mregs, MUSB_DEVCTL),
+			(musb_readb(musb->mregs, MUSB_DEVCTL)
+					& MUSB_DEVCTL_BDEVICE
+				? 'B' : 'A'));
+
+	} else /* peripheral is enabled */ {
+		MUSB_DEV_MODE(musb);
+		musb->xceiv.default_a = 0;
+		musb->xceiv.state = OTG_STATE_B_IDLE;
+
+		status = musb_gadget_setup(musb);
+
+		DBG(1, "%s mode, status %d, dev%02x\n",
+			is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
+			status,
+			musb_readb(musb->mregs, MUSB_DEVCTL));
+
+	}
+
+	if (status == 0)
+		musb_debug_create("driver/musb_hdrc", musb);
+	else {
+fail:
+		if (musb->clock)
+			clk_put(musb->clock);
+		device_init_wakeup(dev, 0);
+		musb_free(musb);
+		return status;
+	}
+
+#ifdef CONFIG_SYSFS
+	status = device_create_file(dev, &dev_attr_mode);
+	status = device_create_file(dev, &dev_attr_vbus);
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+	status = device_create_file(dev, &dev_attr_srp);
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+	status = 0;
+#endif
+
+	return status;
+
+fail2:
+	musb_platform_exit(musb);
+	goto fail;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
+ * bridge to a platform device; this driver then suffices.
+ */
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static u64	*orig_dma_mask;
+#endif
+
+static int __init musb_probe(struct platform_device *pdev)
+{
+	struct device	*dev = &pdev->dev;
+	int		irq = platform_get_irq(pdev, 0);
+	struct resource	*iomem;
+	void __iomem	*base;
+
+	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!iomem || irq == 0)
+		return -ENODEV;
+
+	base = ioremap(iomem->start, iomem->end - iomem->start + 1);
+	if (!base) {
+		dev_err(dev, "ioremap failed\n");
+		return -ENOMEM;
+	}
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+	/* clobbered by use_dma=n */
+	orig_dma_mask = dev->dma_mask;
+#endif
+	return musb_init_controller(dev, irq, base);
+}
+
+static int __devexit musb_remove(struct platform_device *pdev)
+{
+	struct musb	*musb = dev_to_musb(&pdev->dev);
+	void __iomem	*ctrl_base = musb->ctrl_base;
+
+	/* this gets called on rmmod.
+	 *  - Host mode: host may still be active
+	 *  - Peripheral mode: peripheral is deactivated (or never-activated)
+	 *  - OTG mode: both roles are deactivated (or never-activated)
+	 */
+	musb_shutdown(pdev);
+	musb_debug_delete("driver/musb_hdrc", musb);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	if (musb->board_mode == MUSB_HOST)
+		usb_remove_hcd(musb_to_hcd(musb));
+#endif
+	musb_free(musb);
+	iounmap(ctrl_base);
+	device_init_wakeup(&pdev->dev, 0);
+#ifndef CONFIG_MUSB_PIO_ONLY
+	pdev->dev.dma_mask = orig_dma_mask;
+#endif
+	return 0;
+}
+
+#ifdef	CONFIG_PM
+
+static int musb_suspend(struct platform_device *pdev, pm_message_t message)
+{
+	unsigned long	flags;
+	struct musb	*musb = dev_to_musb(&pdev->dev);
+
+	if (!musb->clock)
+		return 0;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	if (is_peripheral_active(musb)) {
+		/* FIXME force disconnect unless we know USB will wake
+		 * the system up quickly enough to respond ...
+		 */
+	} else if (is_host_active(musb)) {
+		/* we know all the children are suspended; sometimes
+		 * they will even be wakeup-enabled.
+		 */
+	}
+
+	if (musb->set_clock)
+		musb->set_clock(musb->clock, 0);
+	else
+		clk_disable(musb->clock);
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return 0;
+}
+
+static int musb_resume(struct platform_device *pdev)
+{
+	unsigned long	flags;
+	struct musb	*musb = dev_to_musb(&pdev->dev);
+
+	if (!musb->clock)
+		return 0;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	if (musb->set_clock)
+		musb->set_clock(musb->clock, 1);
+	else
+		clk_enable(musb->clock);
+
+	/* for static cmos like DaVinci, register values were preserved
+	 * unless for some reason the whole soc powered down and we're
+	 * not treating that as a whole-system restart (e.g. swsusp)
+	 */
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return 0;
+}
+
+#else
+#define	musb_suspend	NULL
+#define	musb_resume	NULL
+#endif
+
+static struct platform_driver musb_driver = {
+	.driver = {
+		.name		= (char *)musb_driver_name,
+		.bus		= &platform_bus_type,
+		.owner		= THIS_MODULE,
+	},
+	.remove		= __devexit_p(musb_remove),
+	.shutdown	= musb_shutdown,
+	.suspend	= musb_suspend,
+	.resume		= musb_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init musb_init(void)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	if (usb_disabled())
+		return 0;
+#endif
+
+	pr_info("%s: version " MUSB_VERSION ", "
+#ifdef CONFIG_MUSB_PIO_ONLY
+		"pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+		"cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+		"musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+		"tusb-omap-dma"
+#else
+		"?dma?"
+#endif
+		", "
+#ifdef CONFIG_USB_MUSB_OTG
+		"otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+		"peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+		"host"
+#endif
+		", debug=%d\n",
+		musb_driver_name, debug);
+	return platform_driver_probe(&musb_driver, musb_probe);
+}
+
+/* make us init after usbcore and before usb
+ * gadget and host-side drivers start to register
+ */
+subsys_initcall(musb_init);
+
+static void __exit musb_cleanup(void)
+{
+	platform_driver_unregister(&musb_driver);
+}
+module_exit(musb_cleanup);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
new file mode 100644
index 0000000..eade46d
--- /dev/null
+++ b/drivers/usb/musb/musb_core.h
@@ -0,0 +1,507 @@
+/*
+ * MUSB OTG driver defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_CORE_H__
+#define __MUSB_CORE_H__
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/musb.h>
+
+struct musb;
+struct musb_hw_ep;
+struct musb_ep;
+
+
+#include "musb_debug.h"
+#include "musb_dma.h"
+
+#include "musb_io.h"
+#include "musb_regs.h"
+
+#include "musb_gadget.h"
+#include "../core/hcd.h"
+#include "musb_host.h"
+
+
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+#define	is_peripheral_enabled(musb)	((musb)->board_mode != MUSB_HOST)
+#define	is_host_enabled(musb)		((musb)->board_mode != MUSB_PERIPHERAL)
+#define	is_otg_enabled(musb)		((musb)->board_mode == MUSB_OTG)
+
+/* NOTE:  otg and peripheral-only state machines start at B_IDLE.
+ * OTG or host-only go to A_IDLE when ID is sensed.
+ */
+#define is_peripheral_active(m)		(!(m)->is_host)
+#define is_host_active(m)		((m)->is_host)
+
+#else
+#define	is_peripheral_enabled(musb)	is_peripheral_capable()
+#define	is_host_enabled(musb)		is_host_capable()
+#define	is_otg_enabled(musb)		0
+
+#define	is_peripheral_active(musb)	is_peripheral_capable()
+#define	is_host_active(musb)		is_host_capable()
+#endif
+
+#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL)
+/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always
+ * override that choice selection (often USB_GADGET_DUMMY_HCD).
+ */
+#ifndef CONFIG_USB_GADGET_MUSB_HDRC
+#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
+#endif
+#endif	/* need MUSB gadget selection */
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/fs.h>
+#define MUSB_CONFIG_PROC_FS
+#endif
+
+/****************************** PERIPHERAL ROLE *****************************/
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+#define	is_peripheral_capable()	(1)
+
+extern irqreturn_t musb_g_ep0_irq(struct musb *);
+extern void musb_g_tx(struct musb *, u8);
+extern void musb_g_rx(struct musb *, u8);
+extern void musb_g_reset(struct musb *);
+extern void musb_g_suspend(struct musb *);
+extern void musb_g_resume(struct musb *);
+extern void musb_g_wakeup(struct musb *);
+extern void musb_g_disconnect(struct musb *);
+
+#else
+
+#define	is_peripheral_capable()	(0)
+
+static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_g_reset(struct musb *m) {}
+static inline void musb_g_suspend(struct musb *m) {}
+static inline void musb_g_resume(struct musb *m) {}
+static inline void musb_g_wakeup(struct musb *m) {}
+static inline void musb_g_disconnect(struct musb *m) {}
+
+#endif
+
+/****************************** HOST ROLE ***********************************/
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+#define	is_host_capable()	(1)
+
+extern irqreturn_t musb_h_ep0_irq(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
+
+#else
+
+#define	is_host_capable()	(0)
+
+static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_host_tx(struct musb *m, u8 e) {}
+static inline void musb_host_rx(struct musb *m, u8 e) {}
+
+#endif
+
+
+/****************************** CONSTANTS ********************************/
+
+#ifndef MUSB_C_NUM_EPS
+#define MUSB_C_NUM_EPS ((u8)16)
+#endif
+
+#ifndef MUSB_MAX_END0_PACKET
+#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
+#endif
+
+/* host side ep0 states */
+enum musb_h_ep0_state {
+	MUSB_EP0_IDLE,
+	MUSB_EP0_START,			/* expect ack of setup */
+	MUSB_EP0_IN,			/* expect IN DATA */
+	MUSB_EP0_OUT,			/* expect ack of OUT DATA */
+	MUSB_EP0_STATUS,		/* expect ack of STATUS */
+} __attribute__ ((packed));
+
+/* peripheral side ep0 states */
+enum musb_g_ep0_state {
+	MUSB_EP0_STAGE_SETUP,		/* idle, waiting for setup */
+	MUSB_EP0_STAGE_TX,		/* IN data */
+	MUSB_EP0_STAGE_RX,		/* OUT data */
+	MUSB_EP0_STAGE_STATUSIN,	/* (after OUT data) */
+	MUSB_EP0_STAGE_STATUSOUT,	/* (after IN data) */
+	MUSB_EP0_STAGE_ACKWAIT,		/* after zlp, before statusin */
+} __attribute__ ((packed));
+
+/* OTG protocol constants */
+#define OTG_TIME_A_WAIT_VRISE	100		/* msec (max) */
+#define OTG_TIME_A_WAIT_BCON	0		/* 0=infinite; min 1000 msec */
+#define OTG_TIME_A_IDLE_BDIS	200		/* msec (min) */
+
+/*************************** REGISTER ACCESS ********************************/
+
+/* Endpoint registers (other than dynfifo setup) can be accessed either
+ * directly with the "flat" model, or after setting up an index register.
+ */
+
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
+		|| defined(CONFIG_ARCH_OMAP3430)
+/* REVISIT indexed access seemed to
+ * misbehave (on DaVinci) for at least peripheral IN ...
+ */
+#define	MUSB_FLAT_REG
+#endif
+
+/* TUSB mapping: "flat" plus ep0 special cases */
+#if	defined(CONFIG_USB_TUSB6010)
+#define musb_ep_select(_mbase, _epnum) \
+	musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define	MUSB_EP_OFFSET			MUSB_TUSB_OFFSET
+
+/* "flat" mapping: each endpoint has its own i/o address */
+#elif	defined(MUSB_FLAT_REG)
+#define musb_ep_select(_mbase, _epnum)	(((void)(_mbase)), ((void)(_epnum)))
+#define	MUSB_EP_OFFSET			MUSB_FLAT_OFFSET
+
+/* "indexed" mapping: INDEX register controls register bank select */
+#else
+#define musb_ep_select(_mbase, _epnum) \
+	musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define	MUSB_EP_OFFSET			MUSB_INDEXED_OFFSET
+#endif
+
+/****************************** FUNCTIONS ********************************/
+
+#define MUSB_HST_MODE(_musb)\
+	{ (_musb)->is_host = true; }
+#define MUSB_DEV_MODE(_musb) \
+	{ (_musb)->is_host = false; }
+
+#define test_devctl_hst_mode(_x) \
+	(musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
+
+#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
+
+/******************************** TYPES *************************************/
+
+/*
+ * struct musb_hw_ep - endpoint hardware (bidirectional)
+ *
+ * Ordered slightly for better cacheline locality.
+ */
+struct musb_hw_ep {
+	struct musb		*musb;
+	void __iomem		*fifo;
+	void __iomem		*regs;
+
+#ifdef CONFIG_USB_TUSB6010
+	void __iomem		*conf;
+#endif
+
+	/* index in musb->endpoints[]  */
+	u8			epnum;
+
+	/* hardware configuration, possibly dynamic */
+	bool			is_shared_fifo;
+	bool			tx_double_buffered;
+	bool			rx_double_buffered;
+	u16			max_packet_sz_tx;
+	u16			max_packet_sz_rx;
+
+	struct dma_channel	*tx_channel;
+	struct dma_channel	*rx_channel;
+
+#ifdef CONFIG_USB_TUSB6010
+	/* TUSB has "asynchronous" and "synchronous" dma modes */
+	dma_addr_t		fifo_async;
+	dma_addr_t		fifo_sync;
+	void __iomem		*fifo_sync_va;
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	void __iomem		*target_regs;
+
+	/* currently scheduled peripheral endpoint */
+	struct musb_qh		*in_qh;
+	struct musb_qh		*out_qh;
+
+	u8			rx_reinit;
+	u8			tx_reinit;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+	/* peripheral side */
+	struct musb_ep		ep_in;			/* TX */
+	struct musb_ep		ep_out;			/* RX */
+#endif
+};
+
+static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+	return next_request(&hw_ep->ep_in);
+#else
+	return NULL;
+#endif
+}
+
+static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+	return next_request(&hw_ep->ep_out);
+#else
+	return NULL;
+#endif
+}
+
+/*
+ * struct musb - Driver instance data.
+ */
+struct musb {
+	/* device lock */
+	spinlock_t		lock;
+	struct clk		*clock;
+	irqreturn_t		(*isr)(int, void *);
+	struct work_struct	irq_work;
+
+/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
+#define MUSB_PORT_STAT_RESUME	(1 << 31)
+
+	u32			port1_status;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	unsigned long		rh_timer;
+
+	enum musb_h_ep0_state	ep0_stage;
+
+	/* bulk traffic normally dedicates endpoint hardware, and each
+	 * direction has its own ring of host side endpoints.
+	 * we try to progress the transfer at the head of each endpoint's
+	 * queue until it completes or NAKs too much; then we try the next
+	 * endpoint.
+	 */
+	struct musb_hw_ep	*bulk_ep;
+
+	struct list_head	control;	/* of musb_qh */
+	struct list_head	in_bulk;	/* of musb_qh */
+	struct list_head	out_bulk;	/* of musb_qh */
+	struct musb_qh		*periodic[32];	/* tree of interrupt+iso */
+#endif
+
+	/* called with IRQs blocked; ON/nonzero implies starting a session,
+	 * and waiting at least a_wait_vrise_tmout.
+	 */
+	void			(*board_set_vbus)(struct musb *, int is_on);
+
+	struct dma_controller	*dma_controller;
+
+	struct device		*controller;
+	void __iomem		*ctrl_base;
+	void __iomem		*mregs;
+
+#ifdef CONFIG_USB_TUSB6010
+	dma_addr_t		async;
+	dma_addr_t		sync;
+	void __iomem		*sync_va;
+#endif
+
+	/* passed down from chip/board specific irq handlers */
+	u8			int_usb;
+	u16			int_rx;
+	u16			int_tx;
+
+	struct otg_transceiver	xceiv;
+
+	int nIrq;
+
+	struct musb_hw_ep	 endpoints[MUSB_C_NUM_EPS];
+#define control_ep		endpoints
+
+#define VBUSERR_RETRY_COUNT	3
+	u16			vbuserr_retry;
+	u16 epmask;
+	u8 nr_endpoints;
+
+	u8 board_mode;		/* enum musb_mode */
+	int			(*board_set_power)(int state);
+
+	int			(*set_clock)(struct clk *clk, int is_active);
+
+	u8			min_power;	/* vbus for periph, in mA/2 */
+
+	bool			is_host;
+
+	int			a_wait_bcon;	/* VBUS timeout in msecs */
+	unsigned long		idle_timeout;	/* Next timeout in jiffies */
+
+	/* active means connected and not suspended */
+	unsigned		is_active:1;
+
+	unsigned is_multipoint:1;
+	unsigned ignore_disconnect:1;	/* during bus resets */
+
+#ifdef C_MP_TX
+	unsigned bulk_split:1;
+#define	can_bulk_split(musb,type) \
+		(((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
+#else
+#define	can_bulk_split(musb, type)	0
+#endif
+
+#ifdef C_MP_RX
+	unsigned bulk_combine:1;
+#define	can_bulk_combine(musb,type) \
+		(((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
+#else
+#define	can_bulk_combine(musb, type)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+	/* is_suspended means USB B_PERIPHERAL suspend */
+	unsigned		is_suspended:1;
+
+	/* may_wakeup means remote wakeup is enabled */
+	unsigned		may_wakeup:1;
+
+	/* is_self_powered is reported in device status and the
+	 * config descriptor.  is_bus_powered means B_PERIPHERAL
+	 * draws some VBUS current; both can be true.
+	 */
+	unsigned		is_self_powered:1;
+	unsigned		is_bus_powered:1;
+
+	unsigned		set_address:1;
+	unsigned		test_mode:1;
+	unsigned		softconnect:1;
+
+	u8			address;
+	u8			test_mode_nr;
+	u16			ackpend;		/* ep0 */
+	enum musb_g_ep0_state	ep0_state;
+	struct usb_gadget	g;			/* the gadget */
+	struct usb_gadget_driver *gadget_driver;	/* its driver */
+#endif
+
+	struct musb_hdrc_config	*config;
+
+#ifdef MUSB_CONFIG_PROC_FS
+	struct proc_dir_entry *proc_entry;
+#endif
+};
+
+static inline void musb_set_vbus(struct musb *musb, int is_on)
+{
+	musb->board_set_vbus(musb, is_on);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static inline struct musb *gadget_to_musb(struct usb_gadget *g)
+{
+	return container_of(g, struct musb, g);
+}
+#endif
+
+
+/***************************** Glue it together *****************************/
+
+extern const char musb_driver_name[];
+
+extern void musb_start(struct musb *musb);
+extern void musb_stop(struct musb *musb);
+
+extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
+extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+
+extern void musb_load_testpacket(struct musb *);
+
+extern irqreturn_t musb_interrupt(struct musb *);
+
+extern void musb_platform_enable(struct musb *musb);
+extern void musb_platform_disable(struct musb *musb);
+
+extern void musb_hnp_stop(struct musb *musb);
+
+extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode);
+
+#if defined(CONFIG_USB_TUSB6010) || \
+	defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
+#else
+#define musb_platform_try_idle(x, y)		do {} while (0)
+#endif
+
+#ifdef CONFIG_USB_TUSB6010
+extern int musb_platform_get_vbus_status(struct musb *musb);
+#else
+#define musb_platform_get_vbus_status(x)	0
+#endif
+
+extern int __init musb_platform_init(struct musb *musb);
+extern int musb_platform_exit(struct musb *musb);
+
+/*-------------------------- ProcFS definitions ---------------------*/
+
+struct proc_dir_entry;
+
+#if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS)
+extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data);
+extern void musb_debug_delete(char *name, struct musb *data);
+
+#else
+static inline struct proc_dir_entry *
+musb_debug_create(char *name, struct musb *data)
+{
+	return NULL;
+}
+static inline void musb_debug_delete(char *name, struct musb *data)
+{
+}
+#endif
+
+#endif	/* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
new file mode 100644
index 0000000..3bdb311
--- /dev/null
+++ b/drivers/usb/musb/musb_debug.h
@@ -0,0 +1,66 @@
+/*
+ * MUSB OTG driver debug defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_DEBUG_H__
+#define __MUSB_LINUX_DEBUG_H__
+
+#define yprintk(facility, format, args...) \
+	do { printk(facility "%s %d: " format , \
+	__func__, __LINE__ , ## args); } while (0)
+#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args)
+#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
+#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
+
+#define xprintk(level, facility, format, args...) do { \
+	if (_dbg_level(level)) { \
+		printk(facility "%s %d: " format , \
+				__func__, __LINE__ , ## args); \
+	} } while (0)
+
+#if MUSB_DEBUG > 0
+extern unsigned debug;
+#else
+#define debug	0
+#endif
+
+static inline int _dbg_level(unsigned l)
+{
+	return debug >= l;
+}
+
+#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
+
+extern const char *otg_state_string(struct musb *);
+
+#endif				/*  __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
new file mode 100644
index 0000000..0a2c4e3
--- /dev/null
+++ b/drivers/usb/musb/musb_dma.h
@@ -0,0 +1,172 @@
+/*
+ * MUSB OTG driver DMA controller abstraction
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_DMA_H__
+#define __MUSB_DMA_H__
+
+struct musb_hw_ep;
+
+/*
+ * DMA Controller Abstraction
+ *
+ * DMA Controllers are abstracted to allow use of a variety of different
+ * implementations of DMA, as allowed by the Inventra USB cores.  On the
+ * host side, usbcore sets up the DMA mappings and flushes caches; on the
+ * peripheral side, the gadget controller driver does.  Responsibilities
+ * of a DMA controller driver include:
+ *
+ *  - Handling the details of moving multiple USB packets
+ *    in cooperation with the Inventra USB core, including especially
+ *    the correct RX side treatment of short packets and buffer-full
+ *    states (both of which terminate transfers).
+ *
+ *  - Knowing the correlation between dma channels and the
+ *    Inventra core's local endpoint resources and data direction.
+ *
+ *  - Maintaining a list of allocated/available channels.
+ *
+ *  - Updating channel status on interrupts,
+ *    whether shared with the Inventra core or separate.
+ */
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+#define	is_dma_capable()	(1)
+#else
+#define	is_dma_capable()	(0)
+#endif
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+#define	is_cppi_enabled()	1
+#else
+#define	is_cppi_enabled()	0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap()			1
+#else
+#define tusb_dma_omap()			0
+#endif
+
+/*
+ * DMA channel status ... updated by the dma controller driver whenever that
+ * status changes, and protected by the overall controller spinlock.
+ */
+enum dma_channel_status {
+	/* unallocated */
+	MUSB_DMA_STATUS_UNKNOWN,
+	/* allocated ... but not busy, no errors */
+	MUSB_DMA_STATUS_FREE,
+	/* busy ... transactions are active */
+	MUSB_DMA_STATUS_BUSY,
+	/* transaction(s) aborted due to ... dma or memory bus error */
+	MUSB_DMA_STATUS_BUS_ABORT,
+	/* transaction(s) aborted due to ... core error or USB fault */
+	MUSB_DMA_STATUS_CORE_ABORT
+};
+
+struct dma_controller;
+
+/**
+ * struct dma_channel - A DMA channel.
+ * @private_data: channel-private data
+ * @max_len: the maximum number of bytes the channel can move in one
+ *	transaction (typically representing many USB maximum-sized packets)
+ * @actual_len: how many bytes have been transferred
+ * @status: current channel status (updated e.g. on interrupt)
+ * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
+ *
+ * channels are associated with an endpoint for the duration of at least
+ * one usb transfer.
+ */
+struct dma_channel {
+	void			*private_data;
+	/* FIXME not void* private_data, but a dma_controller * */
+	size_t			max_len;
+	size_t			actual_len;
+	enum dma_channel_status	status;
+	bool			desired_mode;
+};
+
+/*
+ * dma_channel_status - return status of dma channel
+ * @c: the channel
+ *
+ * Returns the software's view of the channel status.  If that status is BUSY
+ * then it's possible that the hardware has completed (or aborted) a transfer,
+ * so the driver needs to update that status.
+ */
+static inline enum dma_channel_status
+dma_channel_status(struct dma_channel *c)
+{
+	return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/**
+ * struct dma_controller - A DMA Controller.
+ * @start: call this to start a DMA controller;
+ *	return 0 on success, else negative errno
+ * @stop: call this to stop a DMA controller
+ *	return 0 on success, else negative errno
+ * @channel_alloc: call this to allocate a DMA channel
+ * @channel_release: call this to release a DMA channel
+ * @channel_abort: call this to abort a pending DMA transaction,
+ *	returning it to FREE (but allocated) state
+ *
+ * Controllers manage dma channels.
+ */
+struct dma_controller {
+	int			(*start)(struct dma_controller *);
+	int			(*stop)(struct dma_controller *);
+	struct dma_channel	*(*channel_alloc)(struct dma_controller *,
+					struct musb_hw_ep *, u8 is_tx);
+	void			(*channel_release)(struct dma_channel *);
+	int			(*channel_program)(struct dma_channel *channel,
+							u16 maxpacket, u8 mode,
+							dma_addr_t dma_addr,
+							u32 length);
+	int			(*channel_abort)(struct dma_channel *);
+};
+
+/* called after channel_program(), may indicate a fault */
+extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
+
+
+extern struct dma_controller *__init
+dma_controller_create(struct musb *, void __iomem *);
+
+extern void dma_controller_destroy(struct dma_controller *);
+
+#endif	/* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
new file mode 100644
index 0000000..d6a802c
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.c
@@ -0,0 +1,2031 @@
+/*
+ * MUSB OTG driver peripheral support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/stat.h>
+#include <linux/dma-mapping.h>
+
+#include "musb_core.h"
+
+
+/* MUSB PERIPHERAL status 3-mar-2006:
+ *
+ * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
+ *   Minor glitches:
+ *
+ *     + remote wakeup to Linux hosts work, but saw USBCV failures;
+ *       in one test run (operator error?)
+ *     + endpoint halt tests -- in both usbtest and usbcv -- seem
+ *       to break when dma is enabled ... is something wrongly
+ *       clearing SENDSTALL?
+ *
+ * - Mass storage behaved ok when last tested.  Network traffic patterns
+ *   (with lots of short transfers etc) need retesting; they turn up the
+ *   worst cases of the DMA, since short packets are typical but are not
+ *   required.
+ *
+ * - TX/IN
+ *     + both pio and dma behave in with network and g_zero tests
+ *     + no cppi throughput issues other than no-hw-queueing
+ *     + failed with FLAT_REG (DaVinci)
+ *     + seems to behave with double buffering, PIO -and- CPPI
+ *     + with gadgetfs + AIO, requests got lost?
+ *
+ * - RX/OUT
+ *     + both pio and dma behave in with network and g_zero tests
+ *     + dma is slow in typical case (short_not_ok is clear)
+ *     + double buffering ok with PIO
+ *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
+ *     + request lossage observed with gadgetfs
+ *
+ * - ISO not tested ... might work, but only weakly isochronous
+ *
+ * - Gadget driver disabling of softconnect during bind() is ignored; so
+ *   drivers can't hold off host requests until userspace is ready.
+ *   (Workaround:  they can turn it off later.)
+ *
+ * - PORTABILITY (assumes PIO works):
+ *     + DaVinci, basically works with cppi dma
+ *     + OMAP 2430, ditto with mentor dma
+ *     + TUSB 6010, platform-specific dma in the works
+ */
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Immediately complete a request.
+ *
+ * @param request the request to complete
+ * @param status the status to complete the request with
+ * Context: controller locked, IRQs blocked.
+ */
+void musb_g_giveback(
+	struct musb_ep		*ep,
+	struct usb_request	*request,
+	int			status)
+__releases(ep->musb->lock)
+__acquires(ep->musb->lock)
+{
+	struct musb_request	*req;
+	struct musb		*musb;
+	int			busy = ep->busy;
+
+	req = to_musb_request(request);
+
+	list_del(&request->list);
+	if (req->request.status == -EINPROGRESS)
+		req->request.status = status;
+	musb = req->musb;
+
+	ep->busy = 1;
+	spin_unlock(&musb->lock);
+	if (is_dma_capable()) {
+		if (req->mapped) {
+			dma_unmap_single(musb->controller,
+					req->request.dma,
+					req->request.length,
+					req->tx
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+			req->request.dma = DMA_ADDR_INVALID;
+			req->mapped = 0;
+		} else if (req->request.dma != DMA_ADDR_INVALID)
+			dma_sync_single_for_cpu(musb->controller,
+					req->request.dma,
+					req->request.length,
+					req->tx
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+	}
+	if (request->status == 0)
+		DBG(5, "%s done request %p,  %d/%d\n",
+				ep->end_point.name, request,
+				req->request.actual, req->request.length);
+	else
+		DBG(2, "%s request %p, %d/%d fault %d\n",
+				ep->end_point.name, request,
+				req->request.actual, req->request.length,
+				request->status);
+	req->request.complete(&req->ep->end_point, &req->request);
+	spin_lock(&musb->lock);
+	ep->busy = busy;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Abort requests queued to an endpoint using the status. Synchronous.
+ * caller locked controller and blocked irqs, and selected this ep.
+ */
+static void nuke(struct musb_ep *ep, const int status)
+{
+	struct musb_request	*req = NULL;
+	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
+
+	ep->busy = 1;
+
+	if (is_dma_capable() && ep->dma) {
+		struct dma_controller	*c = ep->musb->dma_controller;
+		int value;
+		if (ep->is_in) {
+			musb_writew(epio, MUSB_TXCSR,
+					0 | MUSB_TXCSR_FLUSHFIFO);
+			musb_writew(epio, MUSB_TXCSR,
+					0 | MUSB_TXCSR_FLUSHFIFO);
+		} else {
+			musb_writew(epio, MUSB_RXCSR,
+					0 | MUSB_RXCSR_FLUSHFIFO);
+			musb_writew(epio, MUSB_RXCSR,
+					0 | MUSB_RXCSR_FLUSHFIFO);
+		}
+
+		value = c->channel_abort(ep->dma);
+		DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
+		c->channel_release(ep->dma);
+		ep->dma = NULL;
+	}
+
+	while (!list_empty(&(ep->req_list))) {
+		req = container_of(ep->req_list.next, struct musb_request,
+				request.list);
+		musb_g_giveback(ep, &req->request, status);
+	}
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* Data transfers - pure PIO, pure DMA, or mixed mode */
+
+/*
+ * This assumes the separate CPPI engine is responding to DMA requests
+ * from the usb core ... sequenced a bit differently from mentor dma.
+ */
+
+static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
+{
+	if (can_bulk_split(musb, ep->type))
+		return ep->hw_ep->max_packet_sz_tx;
+	else
+		return ep->packet_sz;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral tx (IN) using Mentor DMA works as follows:
+	Only mode 0 is used for transfers <= wPktSize,
+	mode 1 is used for larger transfers,
+
+	One of the following happens:
+	- Host sends IN token which causes an endpoint interrupt
+		-> TxAvail
+			-> if DMA is currently busy, exit.
+			-> if queue is non-empty, txstate().
+
+	- Request is queued by the gadget driver.
+		-> if queue was previously empty, txstate()
+
+	txstate()
+		-> start
+		  /\	-> setup DMA
+		  |     (data is transferred to the FIFO, then sent out when
+		  |	IN token(s) are recd from Host.
+		  |		-> DMA interrupt on completion
+		  |		   calls TxAvail.
+		  |		      -> stop DMA, ~DmaEenab,
+		  |		      -> set TxPktRdy for last short pkt or zlp
+		  |		      -> Complete Request
+		  |		      -> Continue next request (call txstate)
+		  |___________________________________|
+
+ * Non-Mentor DMA engines can of course work differently, such as by
+ * upleveling from irq-per-packet to irq-per-buffer.
+ */
+
+#endif
+
+/*
+ * An endpoint is transmitting data. This can be called either from
+ * the IRQ routine or from ep.queue() to kickstart a request on an
+ * endpoint.
+ *
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void txstate(struct musb *musb, struct musb_request *req)
+{
+	u8			epnum = req->epnum;
+	struct musb_ep		*musb_ep;
+	void __iomem		*epio = musb->endpoints[epnum].regs;
+	struct usb_request	*request;
+	u16			fifo_count = 0, csr;
+	int			use_dma = 0;
+
+	musb_ep = req->ep;
+
+	/* we shouldn't get here while DMA is active ... but we do ... */
+	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
+		DBG(4, "dma pending...\n");
+		return;
+	}
+
+	/* read TXCSR before */
+	csr = musb_readw(epio, MUSB_TXCSR);
+
+	request = &req->request;
+	fifo_count = min(max_ep_writesize(musb, musb_ep),
+			(int)(request->length - request->actual));
+
+	if (csr & MUSB_TXCSR_TXPKTRDY) {
+		DBG(5, "%s old packet still ready , txcsr %03x\n",
+				musb_ep->end_point.name, csr);
+		return;
+	}
+
+	if (csr & MUSB_TXCSR_P_SENDSTALL) {
+		DBG(5, "%s stalling, txcsr %03x\n",
+				musb_ep->end_point.name, csr);
+		return;
+	}
+
+	DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+			epnum, musb_ep->packet_sz, fifo_count,
+			csr);
+
+#ifndef	CONFIG_MUSB_PIO_ONLY
+	if (is_dma_capable() && musb_ep->dma) {
+		struct dma_controller	*c = musb->dma_controller;
+
+		use_dma = (request->dma != DMA_ADDR_INVALID);
+
+		/* MUSB_TXCSR_P_ISO is still set correctly */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+		{
+			size_t request_size;
+
+			/* setup DMA, then program endpoint CSR */
+			request_size = min(request->length,
+						musb_ep->dma->max_len);
+			if (request_size <= musb_ep->packet_sz)
+				musb_ep->dma->desired_mode = 0;
+			else
+				musb_ep->dma->desired_mode = 1;
+
+			use_dma = use_dma && c->channel_program(
+					musb_ep->dma, musb_ep->packet_sz,
+					musb_ep->dma->desired_mode,
+					request->dma, request_size);
+			if (use_dma) {
+				if (musb_ep->dma->desired_mode == 0) {
+					/* ASSERT: DMAENAB is clear */
+					csr &= ~(MUSB_TXCSR_AUTOSET |
+							MUSB_TXCSR_DMAMODE);
+					csr |= (MUSB_TXCSR_DMAENAB |
+							MUSB_TXCSR_MODE);
+					/* against programming guide */
+				} else
+					csr |= (MUSB_TXCSR_AUTOSET
+							| MUSB_TXCSR_DMAENAB
+							| MUSB_TXCSR_DMAMODE
+							| MUSB_TXCSR_MODE);
+
+				csr &= ~MUSB_TXCSR_P_UNDERRUN;
+				musb_writew(epio, MUSB_TXCSR, csr);
+			}
+		}
+
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+		/* program endpoint CSR first, then setup DMA */
+		csr &= ~(MUSB_TXCSR_AUTOSET
+				| MUSB_TXCSR_DMAMODE
+				| MUSB_TXCSR_P_UNDERRUN
+				| MUSB_TXCSR_TXPKTRDY);
+		csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
+		musb_writew(epio, MUSB_TXCSR,
+			(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
+				| csr);
+
+		/* ensure writebuffer is empty */
+		csr = musb_readw(epio, MUSB_TXCSR);
+
+		/* NOTE host side sets DMAENAB later than this; both are
+		 * OK since the transfer dma glue (between CPPI and Mentor
+		 * fifos) just tells CPPI it could start.  Data only moves
+		 * to the USB TX fifo when both fifos are ready.
+		 */
+
+		/* "mode" is irrelevant here; handle terminating ZLPs like
+		 * PIO does, since the hardware RNDIS mode seems unreliable
+		 * except for the last-packet-is-already-short case.
+		 */
+		use_dma = use_dma && c->channel_program(
+				musb_ep->dma, musb_ep->packet_sz,
+				0,
+				request->dma,
+				request->length);
+		if (!use_dma) {
+			c->channel_release(musb_ep->dma);
+			musb_ep->dma = NULL;
+			/* ASSERT: DMAENAB clear */
+			csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+			/* invariant: prequest->buf is non-null */
+		}
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+		use_dma = use_dma && c->channel_program(
+				musb_ep->dma, musb_ep->packet_sz,
+				request->zero,
+				request->dma,
+				request->length);
+#endif
+	}
+#endif
+
+	if (!use_dma) {
+		musb_write_fifo(musb_ep->hw_ep, fifo_count,
+				(u8 *) (request->buf + request->actual));
+		request->actual += fifo_count;
+		csr |= MUSB_TXCSR_TXPKTRDY;
+		csr &= ~MUSB_TXCSR_P_UNDERRUN;
+		musb_writew(epio, MUSB_TXCSR, csr);
+	}
+
+	/* host may already have the data when this message shows... */
+	DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+			musb_ep->end_point.name, use_dma ? "dma" : "pio",
+			request->actual, request->length,
+			musb_readw(epio, MUSB_TXCSR),
+			fifo_count,
+			musb_readw(epio, MUSB_TXMAXP));
+}
+
+/*
+ * FIFO state update (e.g. data ready).
+ * Called from IRQ,  with controller locked.
+ */
+void musb_g_tx(struct musb *musb, u8 epnum)
+{
+	u16			csr;
+	struct usb_request	*request;
+	u8 __iomem		*mbase = musb->mregs;
+	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
+	void __iomem		*epio = musb->endpoints[epnum].regs;
+	struct dma_channel	*dma;
+
+	musb_ep_select(mbase, epnum);
+	request = next_request(musb_ep);
+
+	csr = musb_readw(epio, MUSB_TXCSR);
+	DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
+
+	dma = is_dma_capable() ? musb_ep->dma : NULL;
+	do {
+		/* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
+		 * probably rates reporting as a host error
+		 */
+		if (csr & MUSB_TXCSR_P_SENTSTALL) {
+			csr |= MUSB_TXCSR_P_WZC_BITS;
+			csr &= ~MUSB_TXCSR_P_SENTSTALL;
+			musb_writew(epio, MUSB_TXCSR, csr);
+			if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+				dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+				musb->dma_controller->channel_abort(dma);
+			}
+
+			if (request)
+				musb_g_giveback(musb_ep, request, -EPIPE);
+
+			break;
+		}
+
+		if (csr & MUSB_TXCSR_P_UNDERRUN) {
+			/* we NAKed, no big deal ... little reason to care */
+			csr |= MUSB_TXCSR_P_WZC_BITS;
+			csr &= ~(MUSB_TXCSR_P_UNDERRUN
+					| MUSB_TXCSR_TXPKTRDY);
+			musb_writew(epio, MUSB_TXCSR, csr);
+			DBG(20, "underrun on ep%d, req %p\n", epnum, request);
+		}
+
+		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+			/* SHOULD NOT HAPPEN ... has with cppi though, after
+			 * changing SENDSTALL (and other cases); harmless?
+			 */
+			DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
+			break;
+		}
+
+		if (request) {
+			u8	is_dma = 0;
+
+			if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+				is_dma = 1;
+				csr |= MUSB_TXCSR_P_WZC_BITS;
+				csr &= ~(MUSB_TXCSR_DMAENAB
+						| MUSB_TXCSR_P_UNDERRUN
+						| MUSB_TXCSR_TXPKTRDY);
+				musb_writew(epio, MUSB_TXCSR, csr);
+				/* ensure writebuffer is empty */
+				csr = musb_readw(epio, MUSB_TXCSR);
+				request->actual += musb_ep->dma->actual_len;
+				DBG(4, "TXCSR%d %04x, dma off, "
+						"len %zu, req %p\n",
+					epnum, csr,
+					musb_ep->dma->actual_len,
+					request);
+			}
+
+			if (is_dma || request->actual == request->length) {
+
+				/* First, maybe a terminating short packet.
+				 * Some DMA engines might handle this by
+				 * themselves.
+				 */
+				if ((request->zero
+						&& request->length
+						&& (request->length
+							% musb_ep->packet_sz)
+							== 0)
+#ifdef CONFIG_USB_INVENTRA_DMA
+					|| (is_dma &&
+						((!dma->desired_mode) ||
+						    (request->actual &
+						    (musb_ep->packet_sz - 1))))
+#endif
+				) {
+					/* on dma completion, fifo may not
+					 * be available yet ...
+					 */
+					if (csr & MUSB_TXCSR_TXPKTRDY)
+						break;
+
+					DBG(4, "sending zero pkt\n");
+					musb_writew(epio, MUSB_TXCSR,
+							MUSB_TXCSR_MODE
+							| MUSB_TXCSR_TXPKTRDY);
+					request->zero = 0;
+				}
+
+				/* ... or if not, then complete it */
+				musb_g_giveback(musb_ep, request, 0);
+
+				/* kickstart next transfer if appropriate;
+				 * the packet that just completed might not
+				 * be transmitted for hours or days.
+				 * REVISIT for double buffering...
+				 * FIXME revisit for stalls too...
+				 */
+				musb_ep_select(mbase, epnum);
+				csr = musb_readw(epio, MUSB_TXCSR);
+				if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+					break;
+				request = musb_ep->desc
+						? next_request(musb_ep)
+						: NULL;
+				if (!request) {
+					DBG(4, "%s idle now\n",
+						musb_ep->end_point.name);
+					break;
+				}
+			}
+
+			txstate(musb, to_musb_request(request));
+		}
+
+	} while (0);
+}
+
+/* ------------------------------------------------------------ */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral rx (OUT) using Mentor DMA works as follows:
+	- Only mode 0 is used.
+
+	- Request is queued by the gadget class driver.
+		-> if queue was previously empty, rxstate()
+
+	- Host sends OUT token which causes an endpoint interrupt
+	  /\      -> RxReady
+	  |	      -> if request queued, call rxstate
+	  |		/\	-> setup DMA
+	  |		|	     -> DMA interrupt on completion
+	  |		|		-> RxReady
+	  |		|		      -> stop DMA
+	  |		|		      -> ack the read
+	  |		|		      -> if data recd = max expected
+	  |		|				by the request, or host
+	  |		|				sent a short packet,
+	  |		|				complete the request,
+	  |		|				and start the next one.
+	  |		|_____________________________________|
+	  |					 else just wait for the host
+	  |					    to send the next OUT token.
+	  |__________________________________________________|
+
+ * Non-Mentor DMA engines can of course work differently.
+ */
+
+#endif
+
+/*
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void rxstate(struct musb *musb, struct musb_request *req)
+{
+	u16			csr = 0;
+	const u8		epnum = req->epnum;
+	struct usb_request	*request = &req->request;
+	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_out;
+	void __iomem		*epio = musb->endpoints[epnum].regs;
+	u16			fifo_count = 0;
+	u16			len = musb_ep->packet_sz;
+
+	csr = musb_readw(epio, MUSB_RXCSR);
+
+	if (is_cppi_enabled() && musb_ep->dma) {
+		struct dma_controller	*c = musb->dma_controller;
+		struct dma_channel	*channel = musb_ep->dma;
+
+		/* NOTE:  CPPI won't actually stop advancing the DMA
+		 * queue after short packet transfers, so this is almost
+		 * always going to run as IRQ-per-packet DMA so that
+		 * faults will be handled correctly.
+		 */
+		if (c->channel_program(channel,
+				musb_ep->packet_sz,
+				!request->short_not_ok,
+				request->dma + request->actual,
+				request->length - request->actual)) {
+
+			/* make sure that if an rxpkt arrived after the irq,
+			 * the cppi engine will be ready to take it as soon
+			 * as DMA is enabled
+			 */
+			csr &= ~(MUSB_RXCSR_AUTOCLEAR
+					| MUSB_RXCSR_DMAMODE);
+			csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
+			musb_writew(epio, MUSB_RXCSR, csr);
+			return;
+		}
+	}
+
+	if (csr & MUSB_RXCSR_RXPKTRDY) {
+		len = musb_readw(epio, MUSB_RXCOUNT);
+		if (request->actual < request->length) {
+#ifdef CONFIG_USB_INVENTRA_DMA
+			if (is_dma_capable() && musb_ep->dma) {
+				struct dma_controller	*c;
+				struct dma_channel	*channel;
+				int			use_dma = 0;
+
+				c = musb->dma_controller;
+				channel = musb_ep->dma;
+
+	/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+	 * mode 0 only. So we do not get endpoint interrupts due to DMA
+	 * completion. We only get interrupts from DMA controller.
+	 *
+	 * We could operate in DMA mode 1 if we knew the size of the tranfer
+	 * in advance. For mass storage class, request->length = what the host
+	 * sends, so that'd work.  But for pretty much everything else,
+	 * request->length is routinely more than what the host sends. For
+	 * most these gadgets, end of is signified either by a short packet,
+	 * or filling the last byte of the buffer.  (Sending extra data in
+	 * that last pckate should trigger an overflow fault.)  But in mode 1,
+	 * we don't get DMA completion interrrupt for short packets.
+	 *
+	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+	 * to get endpoint interrupt on every DMA req, but that didn't seem
+	 * to work reliably.
+	 *
+	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
+	 * then becomes usable as a runtime "use mode 1" hint...
+	 */
+
+				csr |= MUSB_RXCSR_DMAENAB;
+#ifdef USE_MODE1
+				csr |= MUSB_RXCSR_AUTOCLEAR;
+				/* csr |= MUSB_RXCSR_DMAMODE; */
+
+				/* this special sequence (enabling and then
+				 * disabling MUSB_RXCSR_DMAMODE) is required
+				 * to get DMAReq to activate
+				 */
+				musb_writew(epio, MUSB_RXCSR,
+					csr | MUSB_RXCSR_DMAMODE);
+#endif
+				musb_writew(epio, MUSB_RXCSR, csr);
+
+				if (request->actual < request->length) {
+					int transfer_size = 0;
+#ifdef USE_MODE1
+					transfer_size = min(request->length,
+							channel->max_len);
+#else
+					transfer_size = len;
+#endif
+					if (transfer_size <= musb_ep->packet_sz)
+						musb_ep->dma->desired_mode = 0;
+					else
+						musb_ep->dma->desired_mode = 1;
+
+					use_dma = c->channel_program(
+							channel,
+							musb_ep->packet_sz,
+							channel->desired_mode,
+							request->dma
+							+ request->actual,
+							transfer_size);
+				}
+
+				if (use_dma)
+					return;
+			}
+#endif	/* Mentor's DMA */
+
+			fifo_count = request->length - request->actual;
+			DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+					musb_ep->end_point.name,
+					len, fifo_count,
+					musb_ep->packet_sz);
+
+			fifo_count = min(len, fifo_count);
+
+#ifdef	CONFIG_USB_TUSB_OMAP_DMA
+			if (tusb_dma_omap() && musb_ep->dma) {
+				struct dma_controller *c = musb->dma_controller;
+				struct dma_channel *channel = musb_ep->dma;
+				u32 dma_addr = request->dma + request->actual;
+				int ret;
+
+				ret = c->channel_program(channel,
+						musb_ep->packet_sz,
+						channel->desired_mode,
+						dma_addr,
+						fifo_count);
+				if (ret)
+					return;
+			}
+#endif
+
+			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
+					(request->buf + request->actual));
+			request->actual += fifo_count;
+
+			/* REVISIT if we left anything in the fifo, flush
+			 * it and report -EOVERFLOW
+			 */
+
+			/* ack the read! */
+			csr |= MUSB_RXCSR_P_WZC_BITS;
+			csr &= ~MUSB_RXCSR_RXPKTRDY;
+			musb_writew(epio, MUSB_RXCSR, csr);
+		}
+	}
+
+	/* reach the end or short packet detected */
+	if (request->actual == request->length || len < musb_ep->packet_sz)
+		musb_g_giveback(musb_ep, request, 0);
+}
+
+/*
+ * Data ready for a request; called from IRQ
+ */
+void musb_g_rx(struct musb *musb, u8 epnum)
+{
+	u16			csr;
+	struct usb_request	*request;
+	void __iomem		*mbase = musb->mregs;
+	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_out;
+	void __iomem		*epio = musb->endpoints[epnum].regs;
+	struct dma_channel	*dma;
+
+	musb_ep_select(mbase, epnum);
+
+	request = next_request(musb_ep);
+
+	csr = musb_readw(epio, MUSB_RXCSR);
+	dma = is_dma_capable() ? musb_ep->dma : NULL;
+
+	DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
+			csr, dma ? " (dma)" : "", request);
+
+	if (csr & MUSB_RXCSR_P_SENTSTALL) {
+		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+			(void) musb->dma_controller->channel_abort(dma);
+			request->actual += musb_ep->dma->actual_len;
+		}
+
+		csr |= MUSB_RXCSR_P_WZC_BITS;
+		csr &= ~MUSB_RXCSR_P_SENTSTALL;
+		musb_writew(epio, MUSB_RXCSR, csr);
+
+		if (request)
+			musb_g_giveback(musb_ep, request, -EPIPE);
+		goto done;
+	}
+
+	if (csr & MUSB_RXCSR_P_OVERRUN) {
+		/* csr |= MUSB_RXCSR_P_WZC_BITS; */
+		csr &= ~MUSB_RXCSR_P_OVERRUN;
+		musb_writew(epio, MUSB_RXCSR, csr);
+
+		DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
+		if (request && request->status == -EINPROGRESS)
+			request->status = -EOVERFLOW;
+	}
+	if (csr & MUSB_RXCSR_INCOMPRX) {
+		/* REVISIT not necessarily an error */
+		DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
+	}
+
+	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+		/* "should not happen"; likely RXPKTRDY pending for DMA */
+		DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
+			"%s busy, csr %04x\n",
+			musb_ep->end_point.name, csr);
+		goto done;
+	}
+
+	if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
+		csr &= ~(MUSB_RXCSR_AUTOCLEAR
+				| MUSB_RXCSR_DMAENAB
+				| MUSB_RXCSR_DMAMODE);
+		musb_writew(epio, MUSB_RXCSR,
+			MUSB_RXCSR_P_WZC_BITS | csr);
+
+		request->actual += musb_ep->dma->actual_len;
+
+		DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
+			epnum, csr,
+			musb_readw(epio, MUSB_RXCSR),
+			musb_ep->dma->actual_len, request);
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
+		/* Autoclear doesn't clear RxPktRdy for short packets */
+		if ((dma->desired_mode == 0)
+				|| (dma->actual_len
+					& (musb_ep->packet_sz - 1))) {
+			/* ack the read! */
+			csr &= ~MUSB_RXCSR_RXPKTRDY;
+			musb_writew(epio, MUSB_RXCSR, csr);
+		}
+
+		/* incomplete, and not short? wait for next IN packet */
+		if ((request->actual < request->length)
+				&& (musb_ep->dma->actual_len
+					== musb_ep->packet_sz))
+			goto done;
+#endif
+		musb_g_giveback(musb_ep, request, 0);
+
+		request = next_request(musb_ep);
+		if (!request)
+			goto done;
+
+		/* don't start more i/o till the stall clears */
+		musb_ep_select(mbase, epnum);
+		csr = musb_readw(epio, MUSB_RXCSR);
+		if (csr & MUSB_RXCSR_P_SENDSTALL)
+			goto done;
+	}
+
+
+	/* analyze request if the ep is hot */
+	if (request)
+		rxstate(musb, to_musb_request(request));
+	else
+		DBG(3, "packet waiting for %s%s request\n",
+				musb_ep->desc ? "" : "inactive ",
+				musb_ep->end_point.name);
+
+done:
+	return;
+}
+
+/* ------------------------------------------------------------ */
+
+static int musb_gadget_enable(struct usb_ep *ep,
+			const struct usb_endpoint_descriptor *desc)
+{
+	unsigned long		flags;
+	struct musb_ep		*musb_ep;
+	struct musb_hw_ep	*hw_ep;
+	void __iomem		*regs;
+	struct musb		*musb;
+	void __iomem	*mbase;
+	u8		epnum;
+	u16		csr;
+	unsigned	tmp;
+	int		status = -EINVAL;
+
+	if (!ep || !desc)
+		return -EINVAL;
+
+	musb_ep = to_musb_ep(ep);
+	hw_ep = musb_ep->hw_ep;
+	regs = hw_ep->regs;
+	musb = musb_ep->musb;
+	mbase = musb->mregs;
+	epnum = musb_ep->current_epnum;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	if (musb_ep->desc) {
+		status = -EBUSY;
+		goto fail;
+	}
+	musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	/* check direction and (later) maxpacket size against endpoint */
+	if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum)
+		goto fail;
+
+	/* REVISIT this rules out high bandwidth periodic transfers */
+	tmp = le16_to_cpu(desc->wMaxPacketSize);
+	if (tmp & ~0x07ff)
+		goto fail;
+	musb_ep->packet_sz = tmp;
+
+	/* enable the interrupts for the endpoint, set the endpoint
+	 * packet size (or fail), set the mode, clear the fifo
+	 */
+	musb_ep_select(mbase, epnum);
+	if (desc->bEndpointAddress & USB_DIR_IN) {
+		u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
+
+		if (hw_ep->is_shared_fifo)
+			musb_ep->is_in = 1;
+		if (!musb_ep->is_in)
+			goto fail;
+		if (tmp > hw_ep->max_packet_sz_tx)
+			goto fail;
+
+		int_txe |= (1 << epnum);
+		musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+		/* REVISIT if can_bulk_split(), use by updating "tmp";
+		 * likewise high bandwidth periodic tx
+		 */
+		musb_writew(regs, MUSB_TXMAXP, tmp);
+
+		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
+		if (musb_readw(regs, MUSB_TXCSR)
+				& MUSB_TXCSR_FIFONOTEMPTY)
+			csr |= MUSB_TXCSR_FLUSHFIFO;
+		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+			csr |= MUSB_TXCSR_P_ISO;
+
+		/* set twice in case of double buffering */
+		musb_writew(regs, MUSB_TXCSR, csr);
+		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+		musb_writew(regs, MUSB_TXCSR, csr);
+
+	} else {
+		u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
+
+		if (hw_ep->is_shared_fifo)
+			musb_ep->is_in = 0;
+		if (musb_ep->is_in)
+			goto fail;
+		if (tmp > hw_ep->max_packet_sz_rx)
+			goto fail;
+
+		int_rxe |= (1 << epnum);
+		musb_writew(mbase, MUSB_INTRRXE, int_rxe);
+
+		/* REVISIT if can_bulk_combine() use by updating "tmp"
+		 * likewise high bandwidth periodic rx
+		 */
+		musb_writew(regs, MUSB_RXMAXP, tmp);
+
+		/* force shared fifo to OUT-only mode */
+		if (hw_ep->is_shared_fifo) {
+			csr = musb_readw(regs, MUSB_TXCSR);
+			csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
+			musb_writew(regs, MUSB_TXCSR, csr);
+		}
+
+		csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
+		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+			csr |= MUSB_RXCSR_P_ISO;
+		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
+			csr |= MUSB_RXCSR_DISNYET;
+
+		/* set twice in case of double buffering */
+		musb_writew(regs, MUSB_RXCSR, csr);
+		musb_writew(regs, MUSB_RXCSR, csr);
+	}
+
+	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
+	 * for some reason you run out of channels here.
+	 */
+	if (is_dma_capable() && musb->dma_controller) {
+		struct dma_controller	*c = musb->dma_controller;
+
+		musb_ep->dma = c->channel_alloc(c, hw_ep,
+				(desc->bEndpointAddress & USB_DIR_IN));
+	} else
+		musb_ep->dma = NULL;
+
+	musb_ep->desc = desc;
+	musb_ep->busy = 0;
+	status = 0;
+
+	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
+			musb_driver_name, musb_ep->end_point.name,
+			({ char *s; switch (musb_ep->type) {
+			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
+			case USB_ENDPOINT_XFER_INT:	s = "int"; break;
+			default:			s = "iso"; break;
+			}; s; }),
+			musb_ep->is_in ? "IN" : "OUT",
+			musb_ep->dma ? "dma, " : "",
+			musb_ep->packet_sz);
+
+	schedule_work(&musb->irq_work);
+
+fail:
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return status;
+}
+
+/*
+ * Disable an endpoint flushing all requests queued.
+ */
+static int musb_gadget_disable(struct usb_ep *ep)
+{
+	unsigned long	flags;
+	struct musb	*musb;
+	u8		epnum;
+	struct musb_ep	*musb_ep;
+	void __iomem	*epio;
+	int		status = 0;
+
+	musb_ep = to_musb_ep(ep);
+	musb = musb_ep->musb;
+	epnum = musb_ep->current_epnum;
+	epio = musb->endpoints[epnum].regs;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	musb_ep_select(musb->mregs, epnum);
+
+	/* zero the endpoint sizes */
+	if (musb_ep->is_in) {
+		u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
+		int_txe &= ~(1 << epnum);
+		musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
+		musb_writew(epio, MUSB_TXMAXP, 0);
+	} else {
+		u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
+		int_rxe &= ~(1 << epnum);
+		musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
+		musb_writew(epio, MUSB_RXMAXP, 0);
+	}
+
+	musb_ep->desc = NULL;
+
+	/* abort all pending DMA and requests */
+	nuke(musb_ep, -ESHUTDOWN);
+
+	schedule_work(&musb->irq_work);
+
+	spin_unlock_irqrestore(&(musb->lock), flags);
+
+	DBG(2, "%s\n", musb_ep->end_point.name);
+
+	return status;
+}
+
+/*
+ * Allocate a request for an endpoint.
+ * Reused by ep0 code.
+ */
+struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+	struct musb_ep		*musb_ep = to_musb_ep(ep);
+	struct musb_request	*request = NULL;
+
+	request = kzalloc(sizeof *request, gfp_flags);
+	if (request) {
+		INIT_LIST_HEAD(&request->request.list);
+		request->request.dma = DMA_ADDR_INVALID;
+		request->epnum = musb_ep->current_epnum;
+		request->ep = musb_ep;
+	}
+
+	return &request->request;
+}
+
+/*
+ * Free a request
+ * Reused by ep0 code.
+ */
+void musb_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(to_musb_request(req));
+}
+
+static LIST_HEAD(buffers);
+
+struct free_record {
+	struct list_head	list;
+	struct device		*dev;
+	unsigned		bytes;
+	dma_addr_t		dma;
+};
+
+/*
+ * Context: controller locked, IRQs blocked.
+ */
+static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+{
+	DBG(3, "<== %s request %p len %u on hw_ep%d\n",
+		req->tx ? "TX/IN" : "RX/OUT",
+		&req->request, req->request.length, req->epnum);
+
+	musb_ep_select(musb->mregs, req->epnum);
+	if (req->tx)
+		txstate(musb, req);
+	else
+		rxstate(musb, req);
+}
+
+static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
+			gfp_t gfp_flags)
+{
+	struct musb_ep		*musb_ep;
+	struct musb_request	*request;
+	struct musb		*musb;
+	int			status = 0;
+	unsigned long		lockflags;
+
+	if (!ep || !req)
+		return -EINVAL;
+	if (!req->buf)
+		return -ENODATA;
+
+	musb_ep = to_musb_ep(ep);
+	musb = musb_ep->musb;
+
+	request = to_musb_request(req);
+	request->musb = musb;
+
+	if (request->ep != musb_ep)
+		return -EINVAL;
+
+	DBG(4, "<== to %s request=%p\n", ep->name, req);
+
+	/* request is mine now... */
+	request->request.actual = 0;
+	request->request.status = -EINPROGRESS;
+	request->epnum = musb_ep->current_epnum;
+	request->tx = musb_ep->is_in;
+
+	if (is_dma_capable() && musb_ep->dma) {
+		if (request->request.dma == DMA_ADDR_INVALID) {
+			request->request.dma = dma_map_single(
+					musb->controller,
+					request->request.buf,
+					request->request.length,
+					request->tx
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+			request->mapped = 1;
+		} else {
+			dma_sync_single_for_device(musb->controller,
+					request->request.dma,
+					request->request.length,
+					request->tx
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+			request->mapped = 0;
+		}
+	} else if (!req->buf) {
+		return -ENODATA;
+	} else
+		request->mapped = 0;
+
+	spin_lock_irqsave(&musb->lock, lockflags);
+
+	/* don't queue if the ep is down */
+	if (!musb_ep->desc) {
+		DBG(4, "req %p queued to %s while ep %s\n",
+				req, ep->name, "disabled");
+		status = -ESHUTDOWN;
+		goto cleanup;
+	}
+
+	/* add request to the list */
+	list_add_tail(&(request->request.list), &(musb_ep->req_list));
+
+	/* it this is the head of the queue, start i/o ... */
+	if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
+		musb_ep_restart(musb, request);
+
+cleanup:
+	spin_unlock_irqrestore(&musb->lock, lockflags);
+	return status;
+}
+
+static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
+{
+	struct musb_ep		*musb_ep = to_musb_ep(ep);
+	struct usb_request	*r;
+	unsigned long		flags;
+	int			status = 0;
+	struct musb		*musb = musb_ep->musb;
+
+	if (!ep || !request || to_musb_request(request)->ep != musb_ep)
+		return -EINVAL;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	list_for_each_entry(r, &musb_ep->req_list, list) {
+		if (r == request)
+			break;
+	}
+	if (r != request) {
+		DBG(3, "request %p not queued to %s\n", request, ep->name);
+		status = -EINVAL;
+		goto done;
+	}
+
+	/* if the hardware doesn't have the request, easy ... */
+	if (musb_ep->req_list.next != &request->list || musb_ep->busy)
+		musb_g_giveback(musb_ep, request, -ECONNRESET);
+
+	/* ... else abort the dma transfer ... */
+	else if (is_dma_capable() && musb_ep->dma) {
+		struct dma_controller	*c = musb->dma_controller;
+
+		musb_ep_select(musb->mregs, musb_ep->current_epnum);
+		if (c->channel_abort)
+			status = c->channel_abort(musb_ep->dma);
+		else
+			status = -EBUSY;
+		if (status == 0)
+			musb_g_giveback(musb_ep, request, -ECONNRESET);
+	} else {
+		/* NOTE: by sticking to easily tested hardware/driver states,
+		 * we leave counting of in-flight packets imprecise.
+		 */
+		musb_g_giveback(musb_ep, request, -ECONNRESET);
+	}
+
+done:
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return status;
+}
+
+/*
+ * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * data but will queue requests.
+ *
+ * exported to ep0 code
+ */
+int musb_gadget_set_halt(struct usb_ep *ep, int value)
+{
+	struct musb_ep		*musb_ep = to_musb_ep(ep);
+	u8			epnum = musb_ep->current_epnum;
+	struct musb		*musb = musb_ep->musb;
+	void __iomem		*epio = musb->endpoints[epnum].regs;
+	void __iomem		*mbase;
+	unsigned long		flags;
+	u16			csr;
+	struct musb_request	*request = NULL;
+	int			status = 0;
+
+	if (!ep)
+		return -EINVAL;
+	mbase = musb->mregs;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
+		status = -EINVAL;
+		goto done;
+	}
+
+	musb_ep_select(mbase, epnum);
+
+	/* cannot portably stall with non-empty FIFO */
+	request = to_musb_request(next_request(musb_ep));
+	if (value && musb_ep->is_in) {
+		csr = musb_readw(epio, MUSB_TXCSR);
+		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+			DBG(3, "%s fifo busy, cannot halt\n", ep->name);
+			spin_unlock_irqrestore(&musb->lock, flags);
+			return -EAGAIN;
+		}
+
+	}
+
+	/* set/clear the stall and toggle bits */
+	DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+	if (musb_ep->is_in) {
+		csr = musb_readw(epio, MUSB_TXCSR);
+		if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+			csr |= MUSB_TXCSR_FLUSHFIFO;
+		csr |= MUSB_TXCSR_P_WZC_BITS
+			| MUSB_TXCSR_CLRDATATOG;
+		if (value)
+			csr |= MUSB_TXCSR_P_SENDSTALL;
+		else
+			csr &= ~(MUSB_TXCSR_P_SENDSTALL
+				| MUSB_TXCSR_P_SENTSTALL);
+		csr &= ~MUSB_TXCSR_TXPKTRDY;
+		musb_writew(epio, MUSB_TXCSR, csr);
+	} else {
+		csr = musb_readw(epio, MUSB_RXCSR);
+		csr |= MUSB_RXCSR_P_WZC_BITS
+			| MUSB_RXCSR_FLUSHFIFO
+			| MUSB_RXCSR_CLRDATATOG;
+		if (value)
+			csr |= MUSB_RXCSR_P_SENDSTALL;
+		else
+			csr &= ~(MUSB_RXCSR_P_SENDSTALL
+				| MUSB_RXCSR_P_SENTSTALL);
+		musb_writew(epio, MUSB_RXCSR, csr);
+	}
+
+done:
+
+	/* maybe start the first request in the queue */
+	if (!musb_ep->busy && !value && request) {
+		DBG(3, "restarting the request\n");
+		musb_ep_restart(musb, request);
+	}
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return status;
+}
+
+static int musb_gadget_fifo_status(struct usb_ep *ep)
+{
+	struct musb_ep		*musb_ep = to_musb_ep(ep);
+	void __iomem		*epio = musb_ep->hw_ep->regs;
+	int			retval = -EINVAL;
+
+	if (musb_ep->desc && !musb_ep->is_in) {
+		struct musb		*musb = musb_ep->musb;
+		int			epnum = musb_ep->current_epnum;
+		void __iomem		*mbase = musb->mregs;
+		unsigned long		flags;
+
+		spin_lock_irqsave(&musb->lock, flags);
+
+		musb_ep_select(mbase, epnum);
+		/* FIXME return zero unless RXPKTRDY is set */
+		retval = musb_readw(epio, MUSB_RXCOUNT);
+
+		spin_unlock_irqrestore(&musb->lock, flags);
+	}
+	return retval;
+}
+
+static void musb_gadget_fifo_flush(struct usb_ep *ep)
+{
+	struct musb_ep	*musb_ep = to_musb_ep(ep);
+	struct musb	*musb = musb_ep->musb;
+	u8		epnum = musb_ep->current_epnum;
+	void __iomem	*epio = musb->endpoints[epnum].regs;
+	void __iomem	*mbase;
+	unsigned long	flags;
+	u16		csr, int_txe;
+
+	mbase = musb->mregs;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	musb_ep_select(mbase, (u8) epnum);
+
+	/* disable interrupts */
+	int_txe = musb_readw(mbase, MUSB_INTRTXE);
+	musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+	if (musb_ep->is_in) {
+		csr = musb_readw(epio, MUSB_TXCSR);
+		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+			csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+			musb_writew(epio, MUSB_TXCSR, csr);
+			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+			musb_writew(epio, MUSB_TXCSR, csr);
+		}
+	} else {
+		csr = musb_readw(epio, MUSB_RXCSR);
+		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
+		musb_writew(epio, MUSB_RXCSR, csr);
+		musb_writew(epio, MUSB_RXCSR, csr);
+	}
+
+	/* re-enable interrupt */
+	musb_writew(mbase, MUSB_INTRTXE, int_txe);
+	spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static const struct usb_ep_ops musb_ep_ops = {
+	.enable		= musb_gadget_enable,
+	.disable	= musb_gadget_disable,
+	.alloc_request	= musb_alloc_request,
+	.free_request	= musb_free_request,
+	.queue		= musb_gadget_queue,
+	.dequeue	= musb_gadget_dequeue,
+	.set_halt	= musb_gadget_set_halt,
+	.fifo_status	= musb_gadget_fifo_status,
+	.fifo_flush	= musb_gadget_fifo_flush
+};
+
+/* ----------------------------------------------------------------------- */
+
+static int musb_gadget_get_frame(struct usb_gadget *gadget)
+{
+	struct musb	*musb = gadget_to_musb(gadget);
+
+	return (int)musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_gadget_wakeup(struct usb_gadget *gadget)
+{
+	struct musb	*musb = gadget_to_musb(gadget);
+	void __iomem	*mregs = musb->mregs;
+	unsigned long	flags;
+	int		status = -EINVAL;
+	u8		power, devctl;
+	int		retries;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	switch (musb->xceiv.state) {
+	case OTG_STATE_B_PERIPHERAL:
+		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
+		 * that's part of the standard usb 1.1 state machine, and
+		 * doesn't affect OTG transitions.
+		 */
+		if (musb->may_wakeup && musb->is_suspended)
+			break;
+		goto done;
+	case OTG_STATE_B_IDLE:
+		/* Start SRP ... OTG not required. */
+		devctl = musb_readb(mregs, MUSB_DEVCTL);
+		DBG(2, "Sending SRP: devctl: %02x\n", devctl);
+		devctl |= MUSB_DEVCTL_SESSION;
+		musb_writeb(mregs, MUSB_DEVCTL, devctl);
+		devctl = musb_readb(mregs, MUSB_DEVCTL);
+		retries = 100;
+		while (!(devctl & MUSB_DEVCTL_SESSION)) {
+			devctl = musb_readb(mregs, MUSB_DEVCTL);
+			if (retries-- < 1)
+				break;
+		}
+		retries = 10000;
+		while (devctl & MUSB_DEVCTL_SESSION) {
+			devctl = musb_readb(mregs, MUSB_DEVCTL);
+			if (retries-- < 1)
+				break;
+		}
+
+		/* Block idling for at least 1s */
+		musb_platform_try_idle(musb,
+			jiffies + msecs_to_jiffies(1 * HZ));
+
+		status = 0;
+		goto done;
+	default:
+		DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
+		goto done;
+	}
+
+	status = 0;
+
+	power = musb_readb(mregs, MUSB_POWER);
+	power |= MUSB_POWER_RESUME;
+	musb_writeb(mregs, MUSB_POWER, power);
+	DBG(2, "issue wakeup\n");
+
+	/* FIXME do this next chunk in a timer callback, no udelay */
+	mdelay(2);
+
+	power = musb_readb(mregs, MUSB_POWER);
+	power &= ~MUSB_POWER_RESUME;
+	musb_writeb(mregs, MUSB_POWER, power);
+done:
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return status;
+}
+
+static int
+musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
+{
+	struct musb	*musb = gadget_to_musb(gadget);
+
+	musb->is_self_powered = !!is_selfpowered;
+	return 0;
+}
+
+static void musb_pullup(struct musb *musb, int is_on)
+{
+	u8 power;
+
+	power = musb_readb(musb->mregs, MUSB_POWER);
+	if (is_on)
+		power |= MUSB_POWER_SOFTCONN;
+	else
+		power &= ~MUSB_POWER_SOFTCONN;
+
+	/* FIXME if on, HdrcStart; if off, HdrcStop */
+
+	DBG(3, "gadget %s D+ pullup %s\n",
+		musb->gadget_driver->function, is_on ? "on" : "off");
+	musb_writeb(musb->mregs, MUSB_POWER, power);
+}
+
+#if 0
+static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	DBG(2, "<= %s =>\n", __func__);
+
+	/*
+	 * FIXME iff driver's softconnect flag is set (as it is during probe,
+	 * though that can clear it), just musb_pullup().
+	 */
+
+	return -EINVAL;
+}
+#endif
+
+static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+	struct musb	*musb = gadget_to_musb(gadget);
+
+	if (!musb->xceiv.set_power)
+		return -EOPNOTSUPP;
+	return otg_set_power(&musb->xceiv, mA);
+}
+
+static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct musb	*musb = gadget_to_musb(gadget);
+	unsigned long	flags;
+
+	is_on = !!is_on;
+
+	/* NOTE: this assumes we are sensing vbus; we'd rather
+	 * not pullup unless the B-session is active.
+	 */
+	spin_lock_irqsave(&musb->lock, flags);
+	if (is_on != musb->softconnect) {
+		musb->softconnect = is_on;
+		musb_pullup(musb, is_on);
+	}
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return 0;
+}
+
+static const struct usb_gadget_ops musb_gadget_operations = {
+	.get_frame		= musb_gadget_get_frame,
+	.wakeup			= musb_gadget_wakeup,
+	.set_selfpowered	= musb_gadget_set_self_powered,
+	/* .vbus_session		= musb_gadget_vbus_session, */
+	.vbus_draw		= musb_gadget_vbus_draw,
+	.pullup			= musb_gadget_pullup,
+};
+
+/* ----------------------------------------------------------------------- */
+
+/* Registration */
+
+/* Only this registration code "knows" the rule (from USB standards)
+ * about there being only one external upstream port.  It assumes
+ * all peripheral ports are external...
+ */
+static struct musb *the_gadget;
+
+static void musb_gadget_release(struct device *dev)
+{
+	/* kref_put(WHAT) */
+	dev_dbg(dev, "%s\n", __func__);
+}
+
+
+static void __init
+init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
+{
+	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
+
+	memset(ep, 0, sizeof *ep);
+
+	ep->current_epnum = epnum;
+	ep->musb = musb;
+	ep->hw_ep = hw_ep;
+	ep->is_in = is_in;
+
+	INIT_LIST_HEAD(&ep->req_list);
+
+	sprintf(ep->name, "ep%d%s", epnum,
+			(!epnum || hw_ep->is_shared_fifo) ? "" : (
+				is_in ? "in" : "out"));
+	ep->end_point.name = ep->name;
+	INIT_LIST_HEAD(&ep->end_point.ep_list);
+	if (!epnum) {
+		ep->end_point.maxpacket = 64;
+		ep->end_point.ops = &musb_g_ep0_ops;
+		musb->g.ep0 = &ep->end_point;
+	} else {
+		if (is_in)
+			ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
+		else
+			ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
+		ep->end_point.ops = &musb_ep_ops;
+		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
+	}
+}
+
+/*
+ * Initialize the endpoints exposed to peripheral drivers, with backlinks
+ * to the rest of the driver state.
+ */
+static inline void __init musb_g_init_endpoints(struct musb *musb)
+{
+	u8			epnum;
+	struct musb_hw_ep	*hw_ep;
+	unsigned		count = 0;
+
+	/* intialize endpoint list just once */
+	INIT_LIST_HEAD(&(musb->g.ep_list));
+
+	for (epnum = 0, hw_ep = musb->endpoints;
+			epnum < musb->nr_endpoints;
+			epnum++, hw_ep++) {
+		if (hw_ep->is_shared_fifo /* || !epnum */) {
+			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
+			count++;
+		} else {
+			if (hw_ep->max_packet_sz_tx) {
+				init_peripheral_ep(musb, &hw_ep->ep_in,
+							epnum, 1);
+				count++;
+			}
+			if (hw_ep->max_packet_sz_rx) {
+				init_peripheral_ep(musb, &hw_ep->ep_out,
+							epnum, 0);
+				count++;
+			}
+		}
+	}
+}
+
+/* called once during driver setup to initialize and link into
+ * the driver model; memory is zeroed.
+ */
+int __init musb_gadget_setup(struct musb *musb)
+{
+	int status;
+
+	/* REVISIT minor race:  if (erroneously) setting up two
+	 * musb peripherals at the same time, only the bus lock
+	 * is probably held.
+	 */
+	if (the_gadget)
+		return -EBUSY;
+	the_gadget = musb;
+
+	musb->g.ops = &musb_gadget_operations;
+	musb->g.is_dualspeed = 1;
+	musb->g.speed = USB_SPEED_UNKNOWN;
+
+	/* this "gadget" abstracts/virtualizes the controller */
+	strcpy(musb->g.dev.bus_id, "gadget");
+	musb->g.dev.parent = musb->controller;
+	musb->g.dev.dma_mask = musb->controller->dma_mask;
+	musb->g.dev.release = musb_gadget_release;
+	musb->g.name = musb_driver_name;
+
+	if (is_otg_enabled(musb))
+		musb->g.is_otg = 1;
+
+	musb_g_init_endpoints(musb);
+
+	musb->is_active = 0;
+	musb_platform_try_idle(musb, 0);
+
+	status = device_register(&musb->g.dev);
+	if (status != 0)
+		the_gadget = NULL;
+	return status;
+}
+
+void musb_gadget_cleanup(struct musb *musb)
+{
+	if (musb != the_gadget)
+		return;
+
+	device_unregister(&musb->g.dev);
+	the_gadget = NULL;
+}
+
+/*
+ * Register the gadget driver. Used by gadget drivers when
+ * registering themselves with the controller.
+ *
+ * -EINVAL something went wrong (not driver)
+ * -EBUSY another gadget is already using the controller
+ * -ENOMEM no memeory to perform the operation
+ *
+ * @param driver the gadget driver
+ * @return <0 if error, 0 if everything is fine
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+	int retval;
+	unsigned long flags;
+	struct musb *musb = the_gadget;
+
+	if (!driver
+			|| driver->speed != USB_SPEED_HIGH
+			|| !driver->bind
+			|| !driver->setup)
+		return -EINVAL;
+
+	/* driver must be initialized to support peripheral mode */
+	if (!musb || !(musb->board_mode == MUSB_OTG
+				|| musb->board_mode != MUSB_OTG)) {
+		DBG(1, "%s, no dev??\n", __func__);
+		return -ENODEV;
+	}
+
+	DBG(3, "registering driver %s\n", driver->function);
+	spin_lock_irqsave(&musb->lock, flags);
+
+	if (musb->gadget_driver) {
+		DBG(1, "%s is already bound to %s\n",
+				musb_driver_name,
+				musb->gadget_driver->driver.name);
+		retval = -EBUSY;
+	} else {
+		musb->gadget_driver = driver;
+		musb->g.dev.driver = &driver->driver;
+		driver->driver.bus = NULL;
+		musb->softconnect = 1;
+		retval = 0;
+	}
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	if (retval == 0) {
+		retval = driver->bind(&musb->g);
+		if (retval != 0) {
+			DBG(3, "bind to driver %s failed --> %d\n",
+					driver->driver.name, retval);
+			musb->gadget_driver = NULL;
+			musb->g.dev.driver = NULL;
+		}
+
+		spin_lock_irqsave(&musb->lock, flags);
+
+		/* REVISIT always use otg_set_peripheral(), handling
+		 * issues including the root hub one below ...
+		 */
+		musb->xceiv.gadget = &musb->g;
+		musb->xceiv.state = OTG_STATE_B_IDLE;
+		musb->is_active = 1;
+
+		/* FIXME this ignores the softconnect flag.  Drivers are
+		 * allowed hold the peripheral inactive until for example
+		 * userspace hooks up printer hardware or DSP codecs, so
+		 * hosts only see fully functional devices.
+		 */
+
+		if (!is_otg_enabled(musb))
+			musb_start(musb);
+
+		spin_unlock_irqrestore(&musb->lock, flags);
+
+		if (is_otg_enabled(musb)) {
+			DBG(3, "OTG startup...\n");
+
+			/* REVISIT:  funcall to other code, which also
+			 * handles power budgeting ... this way also
+			 * ensures HdrcStart is indirectly called.
+			 */
+			retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+			if (retval < 0) {
+				DBG(1, "add_hcd failed, %d\n", retval);
+				spin_lock_irqsave(&musb->lock, flags);
+				musb->xceiv.gadget = NULL;
+				musb->xceiv.state = OTG_STATE_UNDEFINED;
+				musb->gadget_driver = NULL;
+				musb->g.dev.driver = NULL;
+				spin_unlock_irqrestore(&musb->lock, flags);
+			}
+		}
+	}
+
+	return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
+{
+	int			i;
+	struct musb_hw_ep	*hw_ep;
+
+	/* don't disconnect if it's not connected */
+	if (musb->g.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+	else
+		musb->g.speed = USB_SPEED_UNKNOWN;
+
+	/* deactivate the hardware */
+	if (musb->softconnect) {
+		musb->softconnect = 0;
+		musb_pullup(musb, 0);
+	}
+	musb_stop(musb);
+
+	/* killing any outstanding requests will quiesce the driver;
+	 * then report disconnect
+	 */
+	if (driver) {
+		for (i = 0, hw_ep = musb->endpoints;
+				i < musb->nr_endpoints;
+				i++, hw_ep++) {
+			musb_ep_select(musb->mregs, i);
+			if (hw_ep->is_shared_fifo /* || !epnum */) {
+				nuke(&hw_ep->ep_in, -ESHUTDOWN);
+			} else {
+				if (hw_ep->max_packet_sz_tx)
+					nuke(&hw_ep->ep_in, -ESHUTDOWN);
+				if (hw_ep->max_packet_sz_rx)
+					nuke(&hw_ep->ep_out, -ESHUTDOWN);
+			}
+		}
+
+		spin_unlock(&musb->lock);
+		driver->disconnect(&musb->g);
+		spin_lock(&musb->lock);
+	}
+}
+
+/*
+ * Unregister the gadget driver. Used by gadget drivers when
+ * unregistering themselves from the controller.
+ *
+ * @param driver the gadget driver to unregister
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	unsigned long	flags;
+	int		retval = 0;
+	struct musb	*musb = the_gadget;
+
+	if (!driver || !driver->unbind || !musb)
+		return -EINVAL;
+
+	/* REVISIT always use otg_set_peripheral() here too;
+	 * this needs to shut down the OTG engine.
+	 */
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+#ifdef	CONFIG_USB_MUSB_OTG
+	musb_hnp_stop(musb);
+#endif
+
+	if (musb->gadget_driver == driver) {
+
+		(void) musb_gadget_vbus_draw(&musb->g, 0);
+
+		musb->xceiv.state = OTG_STATE_UNDEFINED;
+		stop_activity(musb, driver);
+
+		DBG(3, "unregistering driver %s\n", driver->function);
+		spin_unlock_irqrestore(&musb->lock, flags);
+		driver->unbind(&musb->g);
+		spin_lock_irqsave(&musb->lock, flags);
+
+		musb->gadget_driver = NULL;
+		musb->g.dev.driver = NULL;
+
+		musb->is_active = 0;
+		musb_platform_try_idle(musb, 0);
+	} else
+		retval = -EINVAL;
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	if (is_otg_enabled(musb) && retval == 0) {
+		usb_remove_hcd(musb_to_hcd(musb));
+		/* FIXME we need to be able to register another
+		 * gadget driver here and have everything work;
+		 * that currently misbehaves.
+		 */
+	}
+
+	return retval;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/* ----------------------------------------------------------------------- */
+
+/* lifecycle operations called through plat_uds.c */
+
+void musb_g_resume(struct musb *musb)
+{
+	musb->is_suspended = 0;
+	switch (musb->xceiv.state) {
+	case OTG_STATE_B_IDLE:
+		break;
+	case OTG_STATE_B_WAIT_ACON:
+	case OTG_STATE_B_PERIPHERAL:
+		musb->is_active = 1;
+		if (musb->gadget_driver && musb->gadget_driver->resume) {
+			spin_unlock(&musb->lock);
+			musb->gadget_driver->resume(&musb->g);
+			spin_lock(&musb->lock);
+		}
+		break;
+	default:
+		WARNING("unhandled RESUME transition (%s)\n",
+				otg_state_string(musb));
+	}
+}
+
+/* called when SOF packets stop for 3+ msec */
+void musb_g_suspend(struct musb *musb)
+{
+	u8	devctl;
+
+	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+	DBG(3, "devctl %02x\n", devctl);
+
+	switch (musb->xceiv.state) {
+	case OTG_STATE_B_IDLE:
+		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+			musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		musb->is_suspended = 1;
+		if (musb->gadget_driver && musb->gadget_driver->suspend) {
+			spin_unlock(&musb->lock);
+			musb->gadget_driver->suspend(&musb->g);
+			spin_lock(&musb->lock);
+		}
+		break;
+	default:
+		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
+		 * A_PERIPHERAL may need care too
+		 */
+		WARNING("unhandled SUSPEND transition (%s)\n",
+				otg_state_string(musb));
+	}
+}
+
+/* Called during SRP */
+void musb_g_wakeup(struct musb *musb)
+{
+	musb_gadget_wakeup(&musb->g);
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void musb_g_disconnect(struct musb *musb)
+{
+	void __iomem	*mregs = musb->mregs;
+	u8	devctl = musb_readb(mregs, MUSB_DEVCTL);
+
+	DBG(3, "devctl %02x\n", devctl);
+
+	/* clear HR */
+	musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
+
+	/* don't draw vbus until new b-default session */
+	(void) musb_gadget_vbus_draw(&musb->g, 0);
+
+	musb->g.speed = USB_SPEED_UNKNOWN;
+	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
+		spin_unlock(&musb->lock);
+		musb->gadget_driver->disconnect(&musb->g);
+		spin_lock(&musb->lock);
+	}
+
+	switch (musb->xceiv.state) {
+	default:
+#ifdef	CONFIG_USB_MUSB_OTG
+		DBG(2, "Unhandled disconnect %s, setting a_idle\n",
+			otg_state_string(musb));
+		musb->xceiv.state = OTG_STATE_A_IDLE;
+		break;
+	case OTG_STATE_A_PERIPHERAL:
+		musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+		break;
+	case OTG_STATE_B_WAIT_ACON:
+	case OTG_STATE_B_HOST:
+#endif
+	case OTG_STATE_B_PERIPHERAL:
+	case OTG_STATE_B_IDLE:
+		musb->xceiv.state = OTG_STATE_B_IDLE;
+		break;
+	case OTG_STATE_B_SRP_INIT:
+		break;
+	}
+
+	musb->is_active = 0;
+}
+
+void musb_g_reset(struct musb *musb)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+	void __iomem	*mbase = musb->mregs;
+	u8		devctl = musb_readb(mbase, MUSB_DEVCTL);
+	u8		power;
+
+	DBG(3, "<== %s addr=%x driver '%s'\n",
+			(devctl & MUSB_DEVCTL_BDEVICE)
+				? "B-Device" : "A-Device",
+			musb_readb(mbase, MUSB_FADDR),
+			musb->gadget_driver
+				? musb->gadget_driver->driver.name
+				: NULL
+			);
+
+	/* report disconnect, if we didn't already (flushing EP state) */
+	if (musb->g.speed != USB_SPEED_UNKNOWN)
+		musb_g_disconnect(musb);
+
+	/* clear HR */
+	else if (devctl & MUSB_DEVCTL_HR)
+		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+
+
+	/* what speed did we negotiate? */
+	power = musb_readb(mbase, MUSB_POWER);
+	musb->g.speed = (power & MUSB_POWER_HSMODE)
+			? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+	/* start in USB_STATE_DEFAULT */
+	musb->is_active = 1;
+	musb->is_suspended = 0;
+	MUSB_DEV_MODE(musb);
+	musb->address = 0;
+	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+
+	musb->may_wakeup = 0;
+	musb->g.b_hnp_enable = 0;
+	musb->g.a_alt_hnp_support = 0;
+	musb->g.a_hnp_support = 0;
+
+	/* Normal reset, as B-Device;
+	 * or else after HNP, as A-Device
+	 */
+	if (devctl & MUSB_DEVCTL_BDEVICE) {
+		musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+		musb->g.is_a_peripheral = 0;
+	} else if (is_otg_enabled(musb)) {
+		musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
+		musb->g.is_a_peripheral = 1;
+	} else
+		WARN_ON(1);
+
+	/* start with default limits on VBUS power draw */
+	(void) musb_gadget_vbus_draw(&musb->g,
+			is_otg_enabled(musb) ? 8 : 100);
+}
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
new file mode 100644
index 0000000..59502da
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.h
@@ -0,0 +1,108 @@
+/*
+ * MUSB OTG driver peripheral defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_GADGET_H
+#define __MUSB_GADGET_H
+
+struct musb_request {
+	struct usb_request	request;
+	struct musb_ep		*ep;
+	struct musb		*musb;
+	u8 tx;			/* endpoint direction */
+	u8 epnum;
+	u8 mapped;
+};
+
+static inline struct musb_request *to_musb_request(struct usb_request *req)
+{
+	return req ? container_of(req, struct musb_request, request) : NULL;
+}
+
+extern struct usb_request *
+musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
+
+
+/*
+ * struct musb_ep - peripheral side view of endpoint rx or tx side
+ */
+struct musb_ep {
+	/* stuff towards the head is basically write-once. */
+	struct usb_ep			end_point;
+	char				name[12];
+	struct musb_hw_ep		*hw_ep;
+	struct musb			*musb;
+	u8				current_epnum;
+
+	/* ... when enabled/disabled ... */
+	u8				type;
+	u8				is_in;
+	u16				packet_sz;
+	const struct usb_endpoint_descriptor	*desc;
+	struct dma_channel		*dma;
+
+	/* later things are modified based on usage */
+	struct list_head		req_list;
+
+	/* true if lock must be dropped but req_list may not be advanced */
+	u8				busy;
+};
+
+static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
+{
+	return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
+}
+
+static inline struct usb_request *next_request(struct musb_ep *ep)
+{
+	struct list_head	*queue = &ep->req_list;
+
+	if (list_empty(queue))
+		return NULL;
+	return container_of(queue->next, struct usb_request, list);
+}
+
+extern void musb_g_tx(struct musb *musb, u8 epnum);
+extern void musb_g_rx(struct musb *musb, u8 epnum);
+
+extern const struct usb_ep_ops musb_g_ep0_ops;
+
+extern int musb_gadget_setup(struct musb *);
+extern void musb_gadget_cleanup(struct musb *);
+
+extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+
+extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
+
+#endif		/* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
new file mode 100644
index 0000000..48d7d3c
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -0,0 +1,981 @@
+/*
+ * MUSB OTG peripheral driver ep0 handling
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include "musb_core.h"
+
+/* ep0 is always musb->endpoints[0].ep_in */
+#define	next_ep0_request(musb)	next_in_request(&(musb)->endpoints[0])
+
+/*
+ * locking note:  we use only the controller lock, for simpler correctness.
+ * It's always held with IRQs blocked.
+ *
+ * It protects the ep0 request queue as well as ep0_state, not just the
+ * controller and indexed registers.  And that lock stays held unless it
+ * needs to be dropped to allow reentering this driver ... like upcalls to
+ * the gadget driver, or adjusting endpoint halt status.
+ */
+
+static char *decode_ep0stage(u8 stage)
+{
+	switch (stage) {
+	case MUSB_EP0_STAGE_SETUP:	return "idle";
+	case MUSB_EP0_STAGE_TX:		return "in";
+	case MUSB_EP0_STAGE_RX:		return "out";
+	case MUSB_EP0_STAGE_ACKWAIT:	return "wait";
+	case MUSB_EP0_STAGE_STATUSIN:	return "in/status";
+	case MUSB_EP0_STAGE_STATUSOUT:	return "out/status";
+	default:			return "?";
+	}
+}
+
+/* handle a standard GET_STATUS request
+ * Context:  caller holds controller lock
+ */
+static int service_tx_status_request(
+	struct musb *musb,
+	const struct usb_ctrlrequest *ctrlrequest)
+{
+	void __iomem	*mbase = musb->mregs;
+	int handled = 1;
+	u8 result[2], epnum = 0;
+	const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+	result[1] = 0;
+
+	switch (recip) {
+	case USB_RECIP_DEVICE:
+		result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
+		result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+#ifdef CONFIG_USB_MUSB_OTG
+		if (musb->g.is_otg) {
+			result[0] |= musb->g.b_hnp_enable
+				<< USB_DEVICE_B_HNP_ENABLE;
+			result[0] |= musb->g.a_alt_hnp_support
+				<< USB_DEVICE_A_ALT_HNP_SUPPORT;
+			result[0] |= musb->g.a_hnp_support
+				<< USB_DEVICE_A_HNP_SUPPORT;
+		}
+#endif
+		break;
+
+	case USB_RECIP_INTERFACE:
+		result[0] = 0;
+		break;
+
+	case USB_RECIP_ENDPOINT: {
+		int		is_in;
+		struct musb_ep	*ep;
+		u16		tmp;
+		void __iomem	*regs;
+
+		epnum = (u8) ctrlrequest->wIndex;
+		if (!epnum) {
+			result[0] = 0;
+			break;
+		}
+
+		is_in = epnum & USB_DIR_IN;
+		if (is_in) {
+			epnum &= 0x0f;
+			ep = &musb->endpoints[epnum].ep_in;
+		} else {
+			ep = &musb->endpoints[epnum].ep_out;
+		}
+		regs = musb->endpoints[epnum].regs;
+
+		if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
+			handled = -EINVAL;
+			break;
+		}
+
+		musb_ep_select(mbase, epnum);
+		if (is_in)
+			tmp = musb_readw(regs, MUSB_TXCSR)
+						& MUSB_TXCSR_P_SENDSTALL;
+		else
+			tmp = musb_readw(regs, MUSB_RXCSR)
+						& MUSB_RXCSR_P_SENDSTALL;
+		musb_ep_select(mbase, 0);
+
+		result[0] = tmp ? 1 : 0;
+		} break;
+
+	default:
+		/* class, vendor, etc ... delegate */
+		handled = 0;
+		break;
+	}
+
+	/* fill up the fifo; caller updates csr0 */
+	if (handled > 0) {
+		u16	len = le16_to_cpu(ctrlrequest->wLength);
+
+		if (len > 2)
+			len = 2;
+		musb_write_fifo(&musb->endpoints[0], len, result);
+	}
+
+	return handled;
+}
+
+/*
+ * handle a control-IN request, the end0 buffer contains the current request
+ * that is supposed to be a standard control request. Assumes the fifo to
+ * be at least 2 bytes long.
+ *
+ * @return 0 if the request was NOT HANDLED,
+ * < 0 when error
+ * > 0 when the request is processed
+ *
+ * Context:  caller holds controller lock
+ */
+static int
+service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+{
+	int handled = 0;	/* not handled */
+
+	if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+			== USB_TYPE_STANDARD) {
+		switch (ctrlrequest->bRequest) {
+		case USB_REQ_GET_STATUS:
+			handled = service_tx_status_request(musb,
+					ctrlrequest);
+			break;
+
+		/* case USB_REQ_SYNC_FRAME: */
+
+		default:
+			break;
+		}
+	}
+	return handled;
+}
+
+/*
+ * Context:  caller holds controller lock
+ */
+static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
+{
+	musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
+	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+}
+
+/*
+ * Tries to start B-device HNP negotiation if enabled via sysfs
+ */
+static inline void musb_try_b_hnp_enable(struct musb *musb)
+{
+	void __iomem	*mbase = musb->mregs;
+	u8		devctl;
+
+	DBG(1, "HNP: Setting HR\n");
+	devctl = musb_readb(mbase, MUSB_DEVCTL);
+	musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
+}
+
+/*
+ * Handle all control requests with no DATA stage, including standard
+ * requests such as:
+ * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
+ *	always delegated to the gadget driver
+ * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
+ *	always handled here, except for class/vendor/... features
+ *
+ * Context:  caller holds controller lock
+ */
+static int
+service_zero_data_request(struct musb *musb,
+		struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+	int handled = -EINVAL;
+	void __iomem *mbase = musb->mregs;
+	const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+	/* the gadget driver handles everything except what we MUST handle */
+	if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+			== USB_TYPE_STANDARD) {
+		switch (ctrlrequest->bRequest) {
+		case USB_REQ_SET_ADDRESS:
+			/* change it after the status stage */
+			musb->set_address = true;
+			musb->address = (u8) (ctrlrequest->wValue & 0x7f);
+			handled = 1;
+			break;
+
+		case USB_REQ_CLEAR_FEATURE:
+			switch (recip) {
+			case USB_RECIP_DEVICE:
+				if (ctrlrequest->wValue
+						!= USB_DEVICE_REMOTE_WAKEUP)
+					break;
+				musb->may_wakeup = 0;
+				handled = 1;
+				break;
+			case USB_RECIP_INTERFACE:
+				break;
+			case USB_RECIP_ENDPOINT:{
+				const u8 num = ctrlrequest->wIndex & 0x0f;
+				struct musb_ep *musb_ep;
+
+				if (num == 0
+						|| num >= MUSB_C_NUM_EPS
+						|| ctrlrequest->wValue
+							!= USB_ENDPOINT_HALT)
+					break;
+
+				if (ctrlrequest->wIndex & USB_DIR_IN)
+					musb_ep = &musb->endpoints[num].ep_in;
+				else
+					musb_ep = &musb->endpoints[num].ep_out;
+				if (!musb_ep->desc)
+					break;
+
+				/* REVISIT do it directly, no locking games */
+				spin_unlock(&musb->lock);
+				musb_gadget_set_halt(&musb_ep->end_point, 0);
+				spin_lock(&musb->lock);
+
+				/* select ep0 again */
+				musb_ep_select(mbase, 0);
+				handled = 1;
+				} break;
+			default:
+				/* class, vendor, etc ... delegate */
+				handled = 0;
+				break;
+			}
+			break;
+
+		case USB_REQ_SET_FEATURE:
+			switch (recip) {
+			case USB_RECIP_DEVICE:
+				handled = 1;
+				switch (ctrlrequest->wValue) {
+				case USB_DEVICE_REMOTE_WAKEUP:
+					musb->may_wakeup = 1;
+					break;
+				case USB_DEVICE_TEST_MODE:
+					if (musb->g.speed != USB_SPEED_HIGH)
+						goto stall;
+					if (ctrlrequest->wIndex & 0xff)
+						goto stall;
+
+					switch (ctrlrequest->wIndex >> 8) {
+					case 1:
+						pr_debug("TEST_J\n");
+						/* TEST_J */
+						musb->test_mode_nr =
+							MUSB_TEST_J;
+						break;
+					case 2:
+						/* TEST_K */
+						pr_debug("TEST_K\n");
+						musb->test_mode_nr =
+							MUSB_TEST_K;
+						break;
+					case 3:
+						/* TEST_SE0_NAK */
+						pr_debug("TEST_SE0_NAK\n");
+						musb->test_mode_nr =
+							MUSB_TEST_SE0_NAK;
+						break;
+					case 4:
+						/* TEST_PACKET */
+						pr_debug("TEST_PACKET\n");
+						musb->test_mode_nr =
+							MUSB_TEST_PACKET;
+						break;
+					default:
+						goto stall;
+					}
+
+					/* enter test mode after irq */
+					if (handled > 0)
+						musb->test_mode = true;
+					break;
+#ifdef CONFIG_USB_MUSB_OTG
+				case USB_DEVICE_B_HNP_ENABLE:
+					if (!musb->g.is_otg)
+						goto stall;
+					musb->g.b_hnp_enable = 1;
+					musb_try_b_hnp_enable(musb);
+					break;
+				case USB_DEVICE_A_HNP_SUPPORT:
+					if (!musb->g.is_otg)
+						goto stall;
+					musb->g.a_hnp_support = 1;
+					break;
+				case USB_DEVICE_A_ALT_HNP_SUPPORT:
+					if (!musb->g.is_otg)
+						goto stall;
+					musb->g.a_alt_hnp_support = 1;
+					break;
+#endif
+stall:
+				default:
+					handled = -EINVAL;
+					break;
+				}
+				break;
+
+			case USB_RECIP_INTERFACE:
+				break;
+
+			case USB_RECIP_ENDPOINT:{
+				const u8		epnum =
+					ctrlrequest->wIndex & 0x0f;
+				struct musb_ep		*musb_ep;
+				struct musb_hw_ep	*ep;
+				void __iomem		*regs;
+				int			is_in;
+				u16			csr;
+
+				if (epnum == 0
+						|| epnum >= MUSB_C_NUM_EPS
+						|| ctrlrequest->wValue
+							!= USB_ENDPOINT_HALT)
+					break;
+
+				ep = musb->endpoints + epnum;
+				regs = ep->regs;
+				is_in = ctrlrequest->wIndex & USB_DIR_IN;
+				if (is_in)
+					musb_ep = &ep->ep_in;
+				else
+					musb_ep = &ep->ep_out;
+				if (!musb_ep->desc)
+					break;
+
+				musb_ep_select(mbase, epnum);
+				if (is_in) {
+					csr = musb_readw(regs,
+							MUSB_TXCSR);
+					if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+						csr |= MUSB_TXCSR_FLUSHFIFO;
+					csr |= MUSB_TXCSR_P_SENDSTALL
+						| MUSB_TXCSR_CLRDATATOG
+						| MUSB_TXCSR_P_WZC_BITS;
+					musb_writew(regs, MUSB_TXCSR,
+							csr);
+				} else {
+					csr = musb_readw(regs,
+							MUSB_RXCSR);
+					csr |= MUSB_RXCSR_P_SENDSTALL
+						| MUSB_RXCSR_FLUSHFIFO
+						| MUSB_RXCSR_CLRDATATOG
+						| MUSB_TXCSR_P_WZC_BITS;
+					musb_writew(regs, MUSB_RXCSR,
+							csr);
+				}
+
+				/* select ep0 again */
+				musb_ep_select(mbase, 0);
+				handled = 1;
+				} break;
+
+			default:
+				/* class, vendor, etc ... delegate */
+				handled = 0;
+				break;
+			}
+			break;
+		default:
+			/* delegate SET_CONFIGURATION, etc */
+			handled = 0;
+		}
+	} else
+		handled = 0;
+	return handled;
+}
+
+/* we have an ep0out data packet
+ * Context:  caller holds controller lock
+ */
+static void ep0_rxstate(struct musb *musb)
+{
+	void __iomem		*regs = musb->control_ep->regs;
+	struct usb_request	*req;
+	u16			tmp;
+
+	req = next_ep0_request(musb);
+
+	/* read packet and ack; or stall because of gadget driver bug:
+	 * should have provided the rx buffer before setup() returned.
+	 */
+	if (req) {
+		void		*buf = req->buf + req->actual;
+		unsigned	len = req->length - req->actual;
+
+		/* read the buffer */
+		tmp = musb_readb(regs, MUSB_COUNT0);
+		if (tmp > len) {
+			req->status = -EOVERFLOW;
+			tmp = len;
+		}
+		musb_read_fifo(&musb->endpoints[0], tmp, buf);
+		req->actual += tmp;
+		tmp = MUSB_CSR0_P_SVDRXPKTRDY;
+		if (tmp < 64 || req->actual == req->length) {
+			musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+			tmp |= MUSB_CSR0_P_DATAEND;
+		} else
+			req = NULL;
+	} else
+		tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
+
+
+	/* Completion handler may choose to stall, e.g. because the
+	 * message just received holds invalid data.
+	 */
+	if (req) {
+		musb->ackpend = tmp;
+		musb_g_ep0_giveback(musb, req);
+		if (!musb->ackpend)
+			return;
+		musb->ackpend = 0;
+	}
+	musb_writew(regs, MUSB_CSR0, tmp);
+}
+
+/*
+ * transmitting to the host (IN), this code might be called from IRQ
+ * and from kernel thread.
+ *
+ * Context:  caller holds controller lock
+ */
+static void ep0_txstate(struct musb *musb)
+{
+	void __iomem		*regs = musb->control_ep->regs;
+	struct usb_request	*request = next_ep0_request(musb);
+	u16			csr = MUSB_CSR0_TXPKTRDY;
+	u8			*fifo_src;
+	u8			fifo_count;
+
+	if (!request) {
+		/* WARN_ON(1); */
+		DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
+		return;
+	}
+
+	/* load the data */
+	fifo_src = (u8 *) request->buf + request->actual;
+	fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
+		request->length - request->actual);
+	musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
+	request->actual += fifo_count;
+
+	/* update the flags */
+	if (fifo_count < MUSB_MAX_END0_PACKET
+			|| request->actual == request->length) {
+		musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+		csr |= MUSB_CSR0_P_DATAEND;
+	} else
+		request = NULL;
+
+	/* report completions as soon as the fifo's loaded; there's no
+	 * win in waiting till this last packet gets acked.  (other than
+	 * very precise fault reporting, needed by USB TMC; possible with
+	 * this hardware, but not usable from portable gadget drivers.)
+	 */
+	if (request) {
+		musb->ackpend = csr;
+		musb_g_ep0_giveback(musb, request);
+		if (!musb->ackpend)
+			return;
+		musb->ackpend = 0;
+	}
+
+	/* send it out, triggering a "txpktrdy cleared" irq */
+	musb_writew(regs, MUSB_CSR0, csr);
+}
+
+/*
+ * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
+ * Fields are left in USB byte-order.
+ *
+ * Context:  caller holds controller lock.
+ */
+static void
+musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
+{
+	struct usb_request	*r;
+	void __iomem		*regs = musb->control_ep->regs;
+
+	musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
+
+	/* NOTE:  earlier 2.6 versions changed setup packets to host
+	 * order, but now USB packets always stay in USB byte order.
+	 */
+	DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+		req->bRequestType,
+		req->bRequest,
+		le16_to_cpu(req->wValue),
+		le16_to_cpu(req->wIndex),
+		le16_to_cpu(req->wLength));
+
+	/* clean up any leftover transfers */
+	r = next_ep0_request(musb);
+	if (r)
+		musb_g_ep0_giveback(musb, r);
+
+	/* For zero-data requests we want to delay the STATUS stage to
+	 * avoid SETUPEND errors.  If we read data (OUT), delay accepting
+	 * packets until there's a buffer to store them in.
+	 *
+	 * If we write data, the controller acts happier if we enable
+	 * the TX FIFO right away, and give the controller a moment
+	 * to switch modes...
+	 */
+	musb->set_address = false;
+	musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
+	if (req->wLength == 0) {
+		if (req->bRequestType & USB_DIR_IN)
+			musb->ackpend |= MUSB_CSR0_TXPKTRDY;
+		musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
+	} else if (req->bRequestType & USB_DIR_IN) {
+		musb->ep0_state = MUSB_EP0_STAGE_TX;
+		musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
+		while ((musb_readw(regs, MUSB_CSR0)
+				& MUSB_CSR0_RXPKTRDY) != 0)
+			cpu_relax();
+		musb->ackpend = 0;
+	} else
+		musb->ep0_state = MUSB_EP0_STAGE_RX;
+}
+
+static int
+forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+	int retval;
+	if (!musb->gadget_driver)
+		return -EOPNOTSUPP;
+	spin_unlock(&musb->lock);
+	retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
+	spin_lock(&musb->lock);
+	return retval;
+}
+
+/*
+ * Handle peripheral ep0 interrupt
+ *
+ * Context: irq handler; we won't re-enter the driver that way.
+ */
+irqreturn_t musb_g_ep0_irq(struct musb *musb)
+{
+	u16		csr;
+	u16		len;
+	void __iomem	*mbase = musb->mregs;
+	void __iomem	*regs = musb->endpoints[0].regs;
+	irqreturn_t	retval = IRQ_NONE;
+
+	musb_ep_select(mbase, 0);	/* select ep0 */
+	csr = musb_readw(regs, MUSB_CSR0);
+	len = musb_readb(regs, MUSB_COUNT0);
+
+	DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
+			csr, len,
+			musb_readb(mbase, MUSB_FADDR),
+			decode_ep0stage(musb->ep0_state));
+
+	/* I sent a stall.. need to acknowledge it now.. */
+	if (csr & MUSB_CSR0_P_SENTSTALL) {
+		musb_writew(regs, MUSB_CSR0,
+				csr & ~MUSB_CSR0_P_SENTSTALL);
+		retval = IRQ_HANDLED;
+		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+		csr = musb_readw(regs, MUSB_CSR0);
+	}
+
+	/* request ended "early" */
+	if (csr & MUSB_CSR0_P_SETUPEND) {
+		musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
+		retval = IRQ_HANDLED;
+		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+		csr = musb_readw(regs, MUSB_CSR0);
+		/* NOTE:  request may need completion */
+	}
+
+	/* docs from Mentor only describe tx, rx, and idle/setup states.
+	 * we need to handle nuances around status stages, and also the
+	 * case where status and setup stages come back-to-back ...
+	 */
+	switch (musb->ep0_state) {
+
+	case MUSB_EP0_STAGE_TX:
+		/* irq on clearing txpktrdy */
+		if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
+			ep0_txstate(musb);
+			retval = IRQ_HANDLED;
+		}
+		break;
+
+	case MUSB_EP0_STAGE_RX:
+		/* irq on set rxpktrdy */
+		if (csr & MUSB_CSR0_RXPKTRDY) {
+			ep0_rxstate(musb);
+			retval = IRQ_HANDLED;
+		}
+		break;
+
+	case MUSB_EP0_STAGE_STATUSIN:
+		/* end of sequence #2 (OUT/RX state) or #3 (no data) */
+
+		/* update address (if needed) only @ the end of the
+		 * status phase per usb spec, which also guarantees
+		 * we get 10 msec to receive this irq... until this
+		 * is done we won't see the next packet.
+		 */
+		if (musb->set_address) {
+			musb->set_address = false;
+			musb_writeb(mbase, MUSB_FADDR, musb->address);
+		}
+
+		/* enter test mode if needed (exit by reset) */
+		else if (musb->test_mode) {
+			DBG(1, "entering TESTMODE\n");
+
+			if (MUSB_TEST_PACKET == musb->test_mode_nr)
+				musb_load_testpacket(musb);
+
+			musb_writeb(mbase, MUSB_TESTMODE,
+					musb->test_mode_nr);
+		}
+		/* FALLTHROUGH */
+
+	case MUSB_EP0_STAGE_STATUSOUT:
+		/* end of sequence #1: write to host (TX state) */
+		{
+			struct usb_request	*req;
+
+			req = next_ep0_request(musb);
+			if (req)
+				musb_g_ep0_giveback(musb, req);
+		}
+		retval = IRQ_HANDLED;
+		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+		/* FALLTHROUGH */
+
+	case MUSB_EP0_STAGE_SETUP:
+		if (csr & MUSB_CSR0_RXPKTRDY) {
+			struct usb_ctrlrequest	setup;
+			int			handled = 0;
+
+			if (len != 8) {
+				ERR("SETUP packet len %d != 8 ?\n", len);
+				break;
+			}
+			musb_read_setup(musb, &setup);
+			retval = IRQ_HANDLED;
+
+			/* sometimes the RESET won't be reported */
+			if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
+				u8	power;
+
+				printk(KERN_NOTICE "%s: peripheral reset "
+						"irq lost!\n",
+						musb_driver_name);
+				power = musb_readb(mbase, MUSB_POWER);
+				musb->g.speed = (power & MUSB_POWER_HSMODE)
+					? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+			}
+
+			switch (musb->ep0_state) {
+
+			/* sequence #3 (no data stage), includes requests
+			 * we can't forward (notably SET_ADDRESS and the
+			 * device/endpoint feature set/clear operations)
+			 * plus SET_CONFIGURATION and others we must
+			 */
+			case MUSB_EP0_STAGE_ACKWAIT:
+				handled = service_zero_data_request(
+						musb, &setup);
+
+				/* status stage might be immediate */
+				if (handled > 0) {
+					musb->ackpend |= MUSB_CSR0_P_DATAEND;
+					musb->ep0_state =
+						MUSB_EP0_STAGE_STATUSIN;
+				}
+				break;
+
+			/* sequence #1 (IN to host), includes GET_STATUS
+			 * requests that we can't forward, GET_DESCRIPTOR
+			 * and others that we must
+			 */
+			case MUSB_EP0_STAGE_TX:
+				handled = service_in_request(musb, &setup);
+				if (handled > 0) {
+					musb->ackpend = MUSB_CSR0_TXPKTRDY
+						| MUSB_CSR0_P_DATAEND;
+					musb->ep0_state =
+						MUSB_EP0_STAGE_STATUSOUT;
+				}
+				break;
+
+			/* sequence #2 (OUT from host), always forward */
+			default:		/* MUSB_EP0_STAGE_RX */
+				break;
+			}
+
+			DBG(3, "handled %d, csr %04x, ep0stage %s\n",
+				handled, csr,
+				decode_ep0stage(musb->ep0_state));
+
+			/* unless we need to delegate this to the gadget
+			 * driver, we know how to wrap this up:  csr0 has
+			 * not yet been written.
+			 */
+			if (handled < 0)
+				goto stall;
+			else if (handled > 0)
+				goto finish;
+
+			handled = forward_to_driver(musb, &setup);
+			if (handled < 0) {
+				musb_ep_select(mbase, 0);
+stall:
+				DBG(3, "stall (%d)\n", handled);
+				musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
+				musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+finish:
+				musb_writew(regs, MUSB_CSR0,
+						musb->ackpend);
+				musb->ackpend = 0;
+			}
+		}
+		break;
+
+	case MUSB_EP0_STAGE_ACKWAIT:
+		/* This should not happen. But happens with tusb6010 with
+		 * g_file_storage and high speed. Do nothing.
+		 */
+		retval = IRQ_HANDLED;
+		break;
+
+	default:
+		/* "can't happen" */
+		WARN_ON(1);
+		musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
+		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+		break;
+	}
+
+	return retval;
+}
+
+
+static int
+musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
+{
+	/* always enabled */
+	return -EINVAL;
+}
+
+static int musb_g_ep0_disable(struct usb_ep *e)
+{
+	/* always enabled */
+	return -EINVAL;
+}
+
+static int
+musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
+{
+	struct musb_ep		*ep;
+	struct musb_request	*req;
+	struct musb		*musb;
+	int			status;
+	unsigned long		lockflags;
+	void __iomem		*regs;
+
+	if (!e || !r)
+		return -EINVAL;
+
+	ep = to_musb_ep(e);
+	musb = ep->musb;
+	regs = musb->control_ep->regs;
+
+	req = to_musb_request(r);
+	req->musb = musb;
+	req->request.actual = 0;
+	req->request.status = -EINPROGRESS;
+	req->tx = ep->is_in;
+
+	spin_lock_irqsave(&musb->lock, lockflags);
+
+	if (!list_empty(&ep->req_list)) {
+		status = -EBUSY;
+		goto cleanup;
+	}
+
+	switch (musb->ep0_state) {
+	case MUSB_EP0_STAGE_RX:		/* control-OUT data */
+	case MUSB_EP0_STAGE_TX:		/* control-IN data */
+	case MUSB_EP0_STAGE_ACKWAIT:	/* zero-length data */
+		status = 0;
+		break;
+	default:
+		DBG(1, "ep0 request queued in state %d\n",
+				musb->ep0_state);
+		status = -EINVAL;
+		goto cleanup;
+	}
+
+	/* add request to the list */
+	list_add_tail(&(req->request.list), &(ep->req_list));
+
+	DBG(3, "queue to %s (%s), length=%d\n",
+			ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
+			req->request.length);
+
+	musb_ep_select(musb->mregs, 0);
+
+	/* sequence #1, IN ... start writing the data */
+	if (musb->ep0_state == MUSB_EP0_STAGE_TX)
+		ep0_txstate(musb);
+
+	/* sequence #3, no-data ... issue IN status */
+	else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
+		if (req->request.length)
+			status = -EINVAL;
+		else {
+			musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+			musb_writew(regs, MUSB_CSR0,
+					musb->ackpend | MUSB_CSR0_P_DATAEND);
+			musb->ackpend = 0;
+			musb_g_ep0_giveback(ep->musb, r);
+		}
+
+	/* else for sequence #2 (OUT), caller provides a buffer
+	 * before the next packet arrives.  deferred responses
+	 * (after SETUP is acked) are racey.
+	 */
+	} else if (musb->ackpend) {
+		musb_writew(regs, MUSB_CSR0, musb->ackpend);
+		musb->ackpend = 0;
+	}
+
+cleanup:
+	spin_unlock_irqrestore(&musb->lock, lockflags);
+	return status;
+}
+
+static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+	/* we just won't support this */
+	return -EINVAL;
+}
+
+static int musb_g_ep0_halt(struct usb_ep *e, int value)
+{
+	struct musb_ep		*ep;
+	struct musb		*musb;
+	void __iomem		*base, *regs;
+	unsigned long		flags;
+	int			status;
+	u16			csr;
+
+	if (!e || !value)
+		return -EINVAL;
+
+	ep = to_musb_ep(e);
+	musb = ep->musb;
+	base = musb->mregs;
+	regs = musb->control_ep->regs;
+	status = 0;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	if (!list_empty(&ep->req_list)) {
+		status = -EBUSY;
+		goto cleanup;
+	}
+
+	musb_ep_select(base, 0);
+	csr = musb->ackpend;
+
+	switch (musb->ep0_state) {
+
+	/* Stalls are usually issued after parsing SETUP packet, either
+	 * directly in irq context from setup() or else later.
+	 */
+	case MUSB_EP0_STAGE_TX:		/* control-IN data */
+	case MUSB_EP0_STAGE_ACKWAIT:	/* STALL for zero-length data */
+	case MUSB_EP0_STAGE_RX:		/* control-OUT data */
+		csr = musb_readw(regs, MUSB_CSR0);
+		/* FALLTHROUGH */
+
+	/* It's also OK to issue stalls during callbacks when a non-empty
+	 * DATA stage buffer has been read (or even written).
+	 */
+	case MUSB_EP0_STAGE_STATUSIN:	/* control-OUT status */
+	case MUSB_EP0_STAGE_STATUSOUT:	/* control-IN status */
+
+		csr |= MUSB_CSR0_P_SENDSTALL;
+		musb_writew(regs, MUSB_CSR0, csr);
+		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+		musb->ackpend = 0;
+		break;
+	default:
+		DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
+		status = -EINVAL;
+	}
+
+cleanup:
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return status;
+}
+
+const struct usb_ep_ops musb_g_ep0_ops = {
+	.enable		= musb_g_ep0_enable,
+	.disable	= musb_g_ep0_disable,
+	.alloc_request	= musb_alloc_request,
+	.free_request	= musb_free_request,
+	.queue		= musb_g_ep0_queue,
+	.dequeue	= musb_g_ep0_dequeue,
+	.set_halt	= musb_g_ep0_halt,
+};
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
new file mode 100644
index 0000000..8b4be01
--- /dev/null
+++ b/drivers/usb/musb/musb_host.c
@@ -0,0 +1,2170 @@
+/*
+ * MUSB OTG driver host support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include "musb_core.h"
+#include "musb_host.h"
+
+
+/* MUSB HOST status 22-mar-2006
+ *
+ * - There's still lots of partial code duplication for fault paths, so
+ *   they aren't handled as consistently as they need to be.
+ *
+ * - PIO mostly behaved when last tested.
+ *     + including ep0, with all usbtest cases 9, 10
+ *     + usbtest 14 (ep0out) doesn't seem to run at all
+ *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
+ *       configurations, but otherwise double buffering passes basic tests.
+ *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
+ *
+ * - DMA (CPPI) ... partially behaves, not currently recommended
+ *     + about 1/15 the speed of typical EHCI implementations (PCI)
+ *     + RX, all too often reqpkt seems to misbehave after tx
+ *     + TX, no known issues (other than evident silicon issue)
+ *
+ * - DMA (Mentor/OMAP) ...has at least toggle update problems
+ *
+ * - Still no traffic scheduling code to make NAKing for bulk or control
+ *   transfers unable to starve other requests; or to make efficient use
+ *   of hardware with periodic transfers.  (Note that network drivers
+ *   commonly post bulk reads that stay pending for a long time; these
+ *   would make very visible trouble.)
+ *
+ * - Not tested with HNP, but some SRP paths seem to behave.
+ *
+ * NOTE 24-August-2006:
+ *
+ * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
+ *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
+ *   mostly works, except that with "usbnet" it's easy to trigger cases
+ *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
+ *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
+ *   although ARP RX wins.  (That test was done with a full speed link.)
+ */
+
+
+/*
+ * NOTE on endpoint usage:
+ *
+ * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
+ * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
+ *
+ * (Yes, bulk _could_ use more of the endpoints than that, and would even
+ * benefit from it ... one remote device may easily be NAKing while others
+ * need to perform transfers in that same direction.  The same thing could
+ * be done in software though, assuming dma cooperates.)
+ *
+ * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
+ * So far that scheduling is both dumb and optimistic:  the endpoint will be
+ * "claimed" until its software queue is no longer refilled.  No multiplexing
+ * of transfers between endpoints, or anything clever.
+ */
+
+
+static void musb_ep_program(struct musb *musb, u8 epnum,
+			struct urb *urb, unsigned int nOut,
+			u8 *buf, u32 len);
+
+/*
+ * Clear TX fifo. Needed to avoid BABBLE errors.
+ */
+static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+{
+	void __iomem	*epio = ep->regs;
+	u16		csr;
+	int		retries = 1000;
+
+	csr = musb_readw(epio, MUSB_TXCSR);
+	while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+		DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+		csr |= MUSB_TXCSR_FLUSHFIFO;
+		musb_writew(epio, MUSB_TXCSR, csr);
+		csr = musb_readw(epio, MUSB_TXCSR);
+		if (retries-- < 1) {
+			ERR("Could not flush host TX fifo: csr: %04x\n", csr);
+			return;
+		}
+		mdelay(1);
+	}
+}
+
+/*
+ * Start transmit. Caller is responsible for locking shared resources.
+ * musb must be locked.
+ */
+static inline void musb_h_tx_start(struct musb_hw_ep *ep)
+{
+	u16	txcsr;
+
+	/* NOTE: no locks here; caller should lock and select EP */
+	if (ep->epnum) {
+		txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+		txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
+		musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+	} else {
+		txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
+		musb_writew(ep->regs, MUSB_CSR0, txcsr);
+	}
+
+}
+
+static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
+{
+	u16	txcsr;
+
+	/* NOTE: no locks here; caller should lock and select EP */
+	txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+	txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
+	musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+}
+
+/*
+ * Start the URB at the front of an endpoint's queue
+ * end must be claimed from the caller.
+ *
+ * Context: controller locked, irqs blocked
+ */
+static void
+musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
+{
+	u16			frame;
+	u32			len;
+	void			*buf;
+	void __iomem		*mbase =  musb->mregs;
+	struct urb		*urb = next_urb(qh);
+	struct musb_hw_ep	*hw_ep = qh->hw_ep;
+	unsigned		pipe = urb->pipe;
+	u8			address = usb_pipedevice(pipe);
+	int			epnum = hw_ep->epnum;
+
+	/* initialize software qh state */
+	qh->offset = 0;
+	qh->segsize = 0;
+
+	/* gather right source of data */
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		/* control transfers always start with SETUP */
+		is_in = 0;
+		hw_ep->out_qh = qh;
+		musb->ep0_stage = MUSB_EP0_START;
+		buf = urb->setup_packet;
+		len = 8;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		qh->iso_idx = 0;
+		qh->frame = 0;
+		buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
+		len = urb->iso_frame_desc[0].length;
+		break;
+	default:		/* bulk, interrupt */
+		buf = urb->transfer_buffer;
+		len = urb->transfer_buffer_length;
+	}
+
+	DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
+			qh, urb, address, qh->epnum,
+			is_in ? "in" : "out",
+			({char *s; switch (qh->type) {
+			case USB_ENDPOINT_XFER_CONTROL:	s = ""; break;
+			case USB_ENDPOINT_XFER_BULK:	s = "-bulk"; break;
+			case USB_ENDPOINT_XFER_ISOC:	s = "-iso"; break;
+			default:			s = "-intr"; break;
+			}; s; }),
+			epnum, buf, len);
+
+	/* Configure endpoint */
+	if (is_in || hw_ep->is_shared_fifo)
+		hw_ep->in_qh = qh;
+	else
+		hw_ep->out_qh = qh;
+	musb_ep_program(musb, epnum, urb, !is_in, buf, len);
+
+	/* transmit may have more work: start it when it is time */
+	if (is_in)
+		return;
+
+	/* determine if the time is right for a periodic transfer */
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_ISOC:
+	case USB_ENDPOINT_XFER_INT:
+		DBG(3, "check whether there's still time for periodic Tx\n");
+		qh->iso_idx = 0;
+		frame = musb_readw(mbase, MUSB_FRAME);
+		/* FIXME this doesn't implement that scheduling policy ...
+		 * or handle framecounter wrapping
+		 */
+		if ((urb->transfer_flags & URB_ISO_ASAP)
+				|| (frame >= urb->start_frame)) {
+			/* REVISIT the SOF irq handler shouldn't duplicate
+			 * this code; and we don't init urb->start_frame...
+			 */
+			qh->frame = 0;
+			goto start;
+		} else {
+			qh->frame = urb->start_frame;
+			/* enable SOF interrupt so we can count down */
+			DBG(1, "SOF for %d\n", epnum);
+#if 1 /* ifndef	CONFIG_ARCH_DAVINCI */
+			musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
+#endif
+		}
+		break;
+	default:
+start:
+		DBG(4, "Start TX%d %s\n", epnum,
+			hw_ep->tx_channel ? "dma" : "pio");
+
+		if (!hw_ep->tx_channel)
+			musb_h_tx_start(hw_ep);
+		else if (is_cppi_enabled() || tusb_dma_omap())
+			cppi_host_txdma_start(hw_ep);
+	}
+}
+
+/* caller owns controller lock, irqs are blocked */
+static void
+__musb_giveback(struct musb *musb, struct urb *urb, int status)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+	DBG(({ int level; switch (urb->status) {
+				case 0:
+					level = 4;
+					break;
+				/* common/boring faults */
+				case -EREMOTEIO:
+				case -ESHUTDOWN:
+				case -ECONNRESET:
+				case -EPIPE:
+					level = 3;
+					break;
+				default:
+					level = 2;
+					break;
+				}; level; }),
+			"complete %p (%d), dev%d ep%d%s, %d/%d\n",
+			urb, urb->status,
+			usb_pipedevice(urb->pipe),
+			usb_pipeendpoint(urb->pipe),
+			usb_pipein(urb->pipe) ? "in" : "out",
+			urb->actual_length, urb->transfer_buffer_length
+			);
+
+	spin_unlock(&musb->lock);
+	usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+	spin_lock(&musb->lock);
+}
+
+/* for bulk/interrupt endpoints only */
+static inline void
+musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+{
+	struct usb_device	*udev = urb->dev;
+	u16			csr;
+	void __iomem		*epio = ep->regs;
+	struct musb_qh		*qh;
+
+	/* FIXME:  the current Mentor DMA code seems to have
+	 * problems getting toggle correct.
+	 */
+
+	if (is_in || ep->is_shared_fifo)
+		qh = ep->in_qh;
+	else
+		qh = ep->out_qh;
+
+	if (!is_in) {
+		csr = musb_readw(epio, MUSB_TXCSR);
+		usb_settoggle(udev, qh->epnum, 1,
+			(csr & MUSB_TXCSR_H_DATATOGGLE)
+				? 1 : 0);
+	} else {
+		csr = musb_readw(epio, MUSB_RXCSR);
+		usb_settoggle(udev, qh->epnum, 0,
+			(csr & MUSB_RXCSR_H_DATATOGGLE)
+				? 1 : 0);
+	}
+}
+
+/* caller owns controller lock, irqs are blocked */
+static struct musb_qh *
+musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+{
+	int			is_in;
+	struct musb_hw_ep	*ep = qh->hw_ep;
+	struct musb		*musb = ep->musb;
+	int			ready = qh->is_ready;
+
+	if (ep->is_shared_fifo)
+		is_in = 1;
+	else
+		is_in = usb_pipein(urb->pipe);
+
+	/* save toggle eagerly, for paranoia */
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_BULK:
+	case USB_ENDPOINT_XFER_INT:
+		musb_save_toggle(ep, is_in, urb);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (status == 0 && urb->error_count)
+			status = -EXDEV;
+		break;
+	}
+
+	usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+
+	qh->is_ready = 0;
+	__musb_giveback(musb, urb, status);
+	qh->is_ready = ready;
+
+	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
+	 * invalidate qh as soon as list_empty(&hep->urb_list)
+	 */
+	if (list_empty(&qh->hep->urb_list)) {
+		struct list_head	*head;
+
+		if (is_in)
+			ep->rx_reinit = 1;
+		else
+			ep->tx_reinit = 1;
+
+		/* clobber old pointers to this qh */
+		if (is_in || ep->is_shared_fifo)
+			ep->in_qh = NULL;
+		else
+			ep->out_qh = NULL;
+		qh->hep->hcpriv = NULL;
+
+		switch (qh->type) {
+
+		case USB_ENDPOINT_XFER_ISOC:
+		case USB_ENDPOINT_XFER_INT:
+			/* this is where periodic bandwidth should be
+			 * de-allocated if it's tracked and allocated;
+			 * and where we'd update the schedule tree...
+			 */
+			musb->periodic[ep->epnum] = NULL;
+			kfree(qh);
+			qh = NULL;
+			break;
+
+		case USB_ENDPOINT_XFER_CONTROL:
+		case USB_ENDPOINT_XFER_BULK:
+			/* fifo policy for these lists, except that NAKing
+			 * should rotate a qh to the end (for fairness).
+			 */
+			head = qh->ring.prev;
+			list_del(&qh->ring);
+			kfree(qh);
+			qh = first_qh(head);
+			break;
+		}
+	}
+	return qh;
+}
+
+/*
+ * Advance this hardware endpoint's queue, completing the specified urb and
+ * advancing to either the next urb queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, irqs are blocked
+ */
+static void
+musb_advance_schedule(struct musb *musb, struct urb *urb,
+		struct musb_hw_ep *hw_ep, int is_in)
+{
+	struct musb_qh	*qh;
+
+	if (is_in || hw_ep->is_shared_fifo)
+		qh = hw_ep->in_qh;
+	else
+		qh = hw_ep->out_qh;
+
+	if (urb->status == -EINPROGRESS)
+		qh = musb_giveback(qh, urb, 0);
+	else
+		qh = musb_giveback(qh, urb, urb->status);
+
+	if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
+		DBG(4, "... next ep%d %cX urb %p\n",
+				hw_ep->epnum, is_in ? 'R' : 'T',
+				next_urb(qh));
+		musb_start_urb(musb, is_in, qh);
+	}
+}
+
+static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+{
+	/* we don't want fifo to fill itself again;
+	 * ignore dma (various models),
+	 * leave toggle alone (may not have been saved yet)
+	 */
+	csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
+	csr &= ~(MUSB_RXCSR_H_REQPKT
+		| MUSB_RXCSR_H_AUTOREQ
+		| MUSB_RXCSR_AUTOCLEAR);
+
+	/* write 2x to allow double buffering */
+	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+
+	/* flush writebuffer */
+	return musb_readw(hw_ep->regs, MUSB_RXCSR);
+}
+
+/*
+ * PIO RX for a packet (or part of it).
+ */
+static bool
+musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+{
+	u16			rx_count;
+	u8			*buf;
+	u16			csr;
+	bool			done = false;
+	u32			length;
+	int			do_flush = 0;
+	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
+	void __iomem		*epio = hw_ep->regs;
+	struct musb_qh		*qh = hw_ep->in_qh;
+	int			pipe = urb->pipe;
+	void			*buffer = urb->transfer_buffer;
+
+	/* musb_ep_select(mbase, epnum); */
+	rx_count = musb_readw(epio, MUSB_RXCOUNT);
+	DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+			urb->transfer_buffer, qh->offset,
+			urb->transfer_buffer_length);
+
+	/* unload FIFO */
+	if (usb_pipeisoc(pipe)) {
+		int					status = 0;
+		struct usb_iso_packet_descriptor	*d;
+
+		if (iso_err) {
+			status = -EILSEQ;
+			urb->error_count++;
+		}
+
+		d = urb->iso_frame_desc + qh->iso_idx;
+		buf = buffer + d->offset;
+		length = d->length;
+		if (rx_count > length) {
+			if (status == 0) {
+				status = -EOVERFLOW;
+				urb->error_count++;
+			}
+			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+			do_flush = 1;
+		} else
+			length = rx_count;
+		urb->actual_length += length;
+		d->actual_length = length;
+
+		d->status = status;
+
+		/* see if we are done */
+		done = (++qh->iso_idx >= urb->number_of_packets);
+	} else {
+		/* non-isoch */
+		buf = buffer + qh->offset;
+		length = urb->transfer_buffer_length - qh->offset;
+		if (rx_count > length) {
+			if (urb->status == -EINPROGRESS)
+				urb->status = -EOVERFLOW;
+			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+			do_flush = 1;
+		} else
+			length = rx_count;
+		urb->actual_length += length;
+		qh->offset += length;
+
+		/* see if we are done */
+		done = (urb->actual_length == urb->transfer_buffer_length)
+			|| (rx_count < qh->maxpacket)
+			|| (urb->status != -EINPROGRESS);
+		if (done
+				&& (urb->status == -EINPROGRESS)
+				&& (urb->transfer_flags & URB_SHORT_NOT_OK)
+				&& (urb->actual_length
+					< urb->transfer_buffer_length))
+			urb->status = -EREMOTEIO;
+	}
+
+	musb_read_fifo(hw_ep, length, buf);
+
+	csr = musb_readw(epio, MUSB_RXCSR);
+	csr |= MUSB_RXCSR_H_WZC_BITS;
+	if (unlikely(do_flush))
+		musb_h_flush_rxfifo(hw_ep, csr);
+	else {
+		/* REVISIT this assumes AUTOCLEAR is never set */
+		csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
+		if (!done)
+			csr |= MUSB_RXCSR_H_REQPKT;
+		musb_writew(epio, MUSB_RXCSR, csr);
+	}
+
+	return done;
+}
+
+/* we don't always need to reinit a given side of an endpoint...
+ * when we do, use tx/rx reinit routine and then construct a new CSR
+ * to address data toggle, NYET, and DMA or PIO.
+ *
+ * it's possible that driver bugs (especially for DMA) or aborting a
+ * transfer might have left the endpoint busier than it should be.
+ * the busy/not-empty tests are basically paranoia.
+ */
+static void
+musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+{
+	u16	csr;
+
+	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
+	 * That always uses tx_reinit since ep0 repurposes TX register
+	 * offsets; the initial SETUP packet is also a kind of OUT.
+	 */
+
+	/* if programmed for Tx, put it in RX mode */
+	if (ep->is_shared_fifo) {
+		csr = musb_readw(ep->regs, MUSB_TXCSR);
+		if (csr & MUSB_TXCSR_MODE) {
+			musb_h_tx_flush_fifo(ep);
+			musb_writew(ep->regs, MUSB_TXCSR,
+					MUSB_TXCSR_FRCDATATOG);
+		}
+		/* clear mode (and everything else) to enable Rx */
+		musb_writew(ep->regs, MUSB_TXCSR, 0);
+
+	/* scrub all previous state, clearing toggle */
+	} else {
+		csr = musb_readw(ep->regs, MUSB_RXCSR);
+		if (csr & MUSB_RXCSR_RXPKTRDY)
+			WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+				musb_readw(ep->regs, MUSB_RXCOUNT));
+
+		musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+	}
+
+	/* target addr and (for multipoint) hub addr/port */
+	if (musb->is_multipoint) {
+		musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
+			qh->addr_reg);
+		musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
+			qh->h_addr_reg);
+		musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
+			qh->h_port_reg);
+	} else
+		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
+
+	/* protocol/endpoint, interval/NAKlimit, i/o size */
+	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
+	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
+	/* NOTE: bulk combining rewrites high bits of maxpacket */
+	musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+
+	ep->rx_reinit = 0;
+}
+
+
+/*
+ * Program an HDRC endpoint as per the given URB
+ * Context: irqs blocked, controller lock held
+ */
+static void musb_ep_program(struct musb *musb, u8 epnum,
+			struct urb *urb, unsigned int is_out,
+			u8 *buf, u32 len)
+{
+	struct dma_controller	*dma_controller;
+	struct dma_channel	*dma_channel;
+	u8			dma_ok;
+	void __iomem		*mbase = musb->mregs;
+	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
+	void __iomem		*epio = hw_ep->regs;
+	struct musb_qh		*qh;
+	u16			packet_sz;
+
+	if (!is_out || hw_ep->is_shared_fifo)
+		qh = hw_ep->in_qh;
+	else
+		qh = hw_ep->out_qh;
+
+	packet_sz = qh->maxpacket;
+
+	DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+				"h_addr%02x h_port%02x bytes %d\n",
+			is_out ? "-->" : "<--",
+			epnum, urb, urb->dev->speed,
+			qh->addr_reg, qh->epnum, is_out ? "out" : "in",
+			qh->h_addr_reg, qh->h_port_reg,
+			len);
+
+	musb_ep_select(mbase, epnum);
+
+	/* candidate for DMA? */
+	dma_controller = musb->dma_controller;
+	if (is_dma_capable() && epnum && dma_controller) {
+		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
+		if (!dma_channel) {
+			dma_channel = dma_controller->channel_alloc(
+					dma_controller, hw_ep, is_out);
+			if (is_out)
+				hw_ep->tx_channel = dma_channel;
+			else
+				hw_ep->rx_channel = dma_channel;
+		}
+	} else
+		dma_channel = NULL;
+
+	/* make sure we clear DMAEnab, autoSet bits from previous run */
+
+	/* OUT/transmit/EP0 or IN/receive? */
+	if (is_out) {
+		u16	csr;
+		u16	int_txe;
+		u16	load_count;
+
+		csr = musb_readw(epio, MUSB_TXCSR);
+
+		/* disable interrupt in case we flush */
+		int_txe = musb_readw(mbase, MUSB_INTRTXE);
+		musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+		/* general endpoint setup */
+		if (epnum) {
+			/* ASSERT:  TXCSR_DMAENAB was already cleared */
+
+			/* flush all old state, set default */
+			musb_h_tx_flush_fifo(hw_ep);
+			csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
+					| MUSB_TXCSR_DMAMODE
+					| MUSB_TXCSR_FRCDATATOG
+					| MUSB_TXCSR_H_RXSTALL
+					| MUSB_TXCSR_H_ERROR
+					| MUSB_TXCSR_TXPKTRDY
+					);
+			csr |= MUSB_TXCSR_MODE;
+
+			if (usb_gettoggle(urb->dev,
+					qh->epnum, 1))
+				csr |= MUSB_TXCSR_H_WR_DATATOGGLE
+					| MUSB_TXCSR_H_DATATOGGLE;
+			else
+				csr |= MUSB_TXCSR_CLRDATATOG;
+
+			/* twice in case of double packet buffering */
+			musb_writew(epio, MUSB_TXCSR, csr);
+			/* REVISIT may need to clear FLUSHFIFO ... */
+			musb_writew(epio, MUSB_TXCSR, csr);
+			csr = musb_readw(epio, MUSB_TXCSR);
+		} else {
+			/* endpoint 0: just flush */
+			musb_writew(epio, MUSB_CSR0,
+				csr | MUSB_CSR0_FLUSHFIFO);
+			musb_writew(epio, MUSB_CSR0,
+				csr | MUSB_CSR0_FLUSHFIFO);
+		}
+
+		/* target addr and (for multipoint) hub addr/port */
+		if (musb->is_multipoint) {
+			musb_writeb(mbase,
+				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
+				qh->addr_reg);
+			musb_writeb(mbase,
+				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
+				qh->h_addr_reg);
+			musb_writeb(mbase,
+				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
+				qh->h_port_reg);
+/* FIXME if !epnum, do the same for RX ... */
+		} else
+			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
+
+		/* protocol/endpoint/interval/NAKlimit */
+		if (epnum) {
+			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
+			if (can_bulk_split(musb, qh->type))
+				musb_writew(epio, MUSB_TXMAXP,
+					packet_sz
+					| ((hw_ep->max_packet_sz_tx /
+						packet_sz) - 1) << 11);
+			else
+				musb_writew(epio, MUSB_TXMAXP,
+					packet_sz);
+			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
+		} else {
+			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
+			if (musb->is_multipoint)
+				musb_writeb(epio, MUSB_TYPE0,
+						qh->type_reg);
+		}
+
+		if (can_bulk_split(musb, qh->type))
+			load_count = min((u32) hw_ep->max_packet_sz_tx,
+						len);
+		else
+			load_count = min((u32) packet_sz, len);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+		if (dma_channel) {
+
+			/* clear previous state */
+			csr = musb_readw(epio, MUSB_TXCSR);
+			csr &= ~(MUSB_TXCSR_AUTOSET
+				| MUSB_TXCSR_DMAMODE
+				| MUSB_TXCSR_DMAENAB);
+			csr |= MUSB_TXCSR_MODE;
+			musb_writew(epio, MUSB_TXCSR,
+				csr | MUSB_TXCSR_MODE);
+
+			qh->segsize = min(len, dma_channel->max_len);
+
+			if (qh->segsize <= packet_sz)
+				dma_channel->desired_mode = 0;
+			else
+				dma_channel->desired_mode = 1;
+
+
+			if (dma_channel->desired_mode == 0) {
+				csr &= ~(MUSB_TXCSR_AUTOSET
+					| MUSB_TXCSR_DMAMODE);
+				csr |= (MUSB_TXCSR_DMAENAB);
+					/* against programming guide */
+			} else
+				csr |= (MUSB_TXCSR_AUTOSET
+					| MUSB_TXCSR_DMAENAB
+					| MUSB_TXCSR_DMAMODE);
+
+			musb_writew(epio, MUSB_TXCSR, csr);
+
+			dma_ok = dma_controller->channel_program(
+					dma_channel, packet_sz,
+					dma_channel->desired_mode,
+					urb->transfer_dma,
+					qh->segsize);
+			if (dma_ok) {
+				load_count = 0;
+			} else {
+				dma_controller->channel_release(dma_channel);
+				if (is_out)
+					hw_ep->tx_channel = NULL;
+				else
+					hw_ep->rx_channel = NULL;
+				dma_channel = NULL;
+			}
+		}
+#endif
+
+		/* candidate for DMA */
+		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+
+			/* program endpoint CSRs first, then setup DMA.
+			 * assume CPPI setup succeeds.
+			 * defer enabling dma.
+			 */
+			csr = musb_readw(epio, MUSB_TXCSR);
+			csr &= ~(MUSB_TXCSR_AUTOSET
+					| MUSB_TXCSR_DMAMODE
+					| MUSB_TXCSR_DMAENAB);
+			csr |= MUSB_TXCSR_MODE;
+			musb_writew(epio, MUSB_TXCSR,
+				csr | MUSB_TXCSR_MODE);
+
+			dma_channel->actual_len = 0L;
+			qh->segsize = len;
+
+			/* TX uses "rndis" mode automatically, but needs help
+			 * to identify the zero-length-final-packet case.
+			 */
+			dma_ok = dma_controller->channel_program(
+					dma_channel, packet_sz,
+					(urb->transfer_flags
+							& URB_ZERO_PACKET)
+						== URB_ZERO_PACKET,
+					urb->transfer_dma,
+					qh->segsize);
+			if (dma_ok) {
+				load_count = 0;
+			} else {
+				dma_controller->channel_release(dma_channel);
+				hw_ep->tx_channel = NULL;
+				dma_channel = NULL;
+
+				/* REVISIT there's an error path here that
+				 * needs handling:  can't do dma, but
+				 * there's no pio buffer address...
+				 */
+			}
+		}
+
+		if (load_count) {
+			/* ASSERT:  TXCSR_DMAENAB was already cleared */
+
+			/* PIO to load FIFO */
+			qh->segsize = load_count;
+			musb_write_fifo(hw_ep, load_count, buf);
+			csr = musb_readw(epio, MUSB_TXCSR);
+			csr &= ~(MUSB_TXCSR_DMAENAB
+				| MUSB_TXCSR_DMAMODE
+				| MUSB_TXCSR_AUTOSET);
+			/* write CSR */
+			csr |= MUSB_TXCSR_MODE;
+
+			if (epnum)
+				musb_writew(epio, MUSB_TXCSR, csr);
+		}
+
+		/* re-enable interrupt */
+		musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+	/* IN/receive */
+	} else {
+		u16	csr;
+
+		if (hw_ep->rx_reinit) {
+			musb_rx_reinit(musb, qh, hw_ep);
+
+			/* init new state: toggle and NYET, maybe DMA later */
+			if (usb_gettoggle(urb->dev, qh->epnum, 0))
+				csr = MUSB_RXCSR_H_WR_DATATOGGLE
+					| MUSB_RXCSR_H_DATATOGGLE;
+			else
+				csr = 0;
+			if (qh->type == USB_ENDPOINT_XFER_INT)
+				csr |= MUSB_RXCSR_DISNYET;
+
+		} else {
+			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+			if (csr & (MUSB_RXCSR_RXPKTRDY
+					| MUSB_RXCSR_DMAENAB
+					| MUSB_RXCSR_H_REQPKT))
+				ERR("broken !rx_reinit, ep%d csr %04x\n",
+						hw_ep->epnum, csr);
+
+			/* scrub any stale state, leaving toggle alone */
+			csr &= MUSB_RXCSR_DISNYET;
+		}
+
+		/* kick things off */
+
+		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+			/* candidate for DMA */
+			if (dma_channel) {
+				dma_channel->actual_len = 0L;
+				qh->segsize = len;
+
+				/* AUTOREQ is in a DMA register */
+				musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+				csr = musb_readw(hw_ep->regs,
+						MUSB_RXCSR);
+
+				/* unless caller treats short rx transfers as
+				 * errors, we dare not queue multiple transfers.
+				 */
+				dma_ok = dma_controller->channel_program(
+						dma_channel, packet_sz,
+						!(urb->transfer_flags
+							& URB_SHORT_NOT_OK),
+						urb->transfer_dma,
+						qh->segsize);
+				if (!dma_ok) {
+					dma_controller->channel_release(
+							dma_channel);
+					hw_ep->rx_channel = NULL;
+					dma_channel = NULL;
+				} else
+					csr |= MUSB_RXCSR_DMAENAB;
+			}
+		}
+
+		csr |= MUSB_RXCSR_H_REQPKT;
+		DBG(7, "RXCSR%d := %04x\n", epnum, csr);
+		musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+	}
+}
+
+
+/*
+ * Service the default endpoint (ep0) as host.
+ * Return true until it's time to start the status stage.
+ */
+static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+{
+	bool			 more = false;
+	u8			*fifo_dest = NULL;
+	u16			fifo_count = 0;
+	struct musb_hw_ep	*hw_ep = musb->control_ep;
+	struct musb_qh		*qh = hw_ep->in_qh;
+	struct usb_ctrlrequest	*request;
+
+	switch (musb->ep0_stage) {
+	case MUSB_EP0_IN:
+		fifo_dest = urb->transfer_buffer + urb->actual_length;
+		fifo_count = min(len, ((u16) (urb->transfer_buffer_length
+					- urb->actual_length)));
+		if (fifo_count < len)
+			urb->status = -EOVERFLOW;
+
+		musb_read_fifo(hw_ep, fifo_count, fifo_dest);
+
+		urb->actual_length += fifo_count;
+		if (len < qh->maxpacket) {
+			/* always terminate on short read; it's
+			 * rarely reported as an error.
+			 */
+		} else if (urb->actual_length <
+				urb->transfer_buffer_length)
+			more = true;
+		break;
+	case MUSB_EP0_START:
+		request = (struct usb_ctrlrequest *) urb->setup_packet;
+
+		if (!request->wLength) {
+			DBG(4, "start no-DATA\n");
+			break;
+		} else if (request->bRequestType & USB_DIR_IN) {
+			DBG(4, "start IN-DATA\n");
+			musb->ep0_stage = MUSB_EP0_IN;
+			more = true;
+			break;
+		} else {
+			DBG(4, "start OUT-DATA\n");
+			musb->ep0_stage = MUSB_EP0_OUT;
+			more = true;
+		}
+		/* FALLTHROUGH */
+	case MUSB_EP0_OUT:
+		fifo_count = min(qh->maxpacket, ((u16)
+				(urb->transfer_buffer_length
+				- urb->actual_length)));
+
+		if (fifo_count) {
+			fifo_dest = (u8 *) (urb->transfer_buffer
+					+ urb->actual_length);
+			DBG(3, "Sending %d bytes to %p\n",
+					fifo_count, fifo_dest);
+			musb_write_fifo(hw_ep, fifo_count, fifo_dest);
+
+			urb->actual_length += fifo_count;
+			more = true;
+		}
+		break;
+	default:
+		ERR("bogus ep0 stage %d\n", musb->ep0_stage);
+		break;
+	}
+
+	return more;
+}
+
+/*
+ * Handle default endpoint interrupt as host. Only called in IRQ time
+ * from the LinuxIsr() interrupt service routine.
+ *
+ * called with controller irqlocked
+ */
+irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+	struct urb		*urb;
+	u16			csr, len;
+	int			status = 0;
+	void __iomem		*mbase = musb->mregs;
+	struct musb_hw_ep	*hw_ep = musb->control_ep;
+	void __iomem		*epio = hw_ep->regs;
+	struct musb_qh		*qh = hw_ep->in_qh;
+	bool			complete = false;
+	irqreturn_t		retval = IRQ_NONE;
+
+	/* ep0 only has one queue, "in" */
+	urb = next_urb(qh);
+
+	musb_ep_select(mbase, 0);
+	csr = musb_readw(epio, MUSB_CSR0);
+	len = (csr & MUSB_CSR0_RXPKTRDY)
+			? musb_readb(epio, MUSB_COUNT0)
+			: 0;
+
+	DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+		csr, qh, len, urb, musb->ep0_stage);
+
+	/* if we just did status stage, we are done */
+	if (MUSB_EP0_STATUS == musb->ep0_stage) {
+		retval = IRQ_HANDLED;
+		complete = true;
+	}
+
+	/* prepare status */
+	if (csr & MUSB_CSR0_H_RXSTALL) {
+		DBG(6, "STALLING ENDPOINT\n");
+		status = -EPIPE;
+
+	} else if (csr & MUSB_CSR0_H_ERROR) {
+		DBG(2, "no response, csr0 %04x\n", csr);
+		status = -EPROTO;
+
+	} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
+		DBG(2, "control NAK timeout\n");
+
+		/* NOTE:  this code path would be a good place to PAUSE a
+		 * control transfer, if another one is queued, so that
+		 * ep0 is more likely to stay busy.
+		 *
+		 * if (qh->ring.next != &musb->control), then
+		 * we have a candidate... NAKing is *NOT* an error
+		 */
+		musb_writew(epio, MUSB_CSR0, 0);
+		retval = IRQ_HANDLED;
+	}
+
+	if (status) {
+		DBG(6, "aborting\n");
+		retval = IRQ_HANDLED;
+		if (urb)
+			urb->status = status;
+		complete = true;
+
+		/* use the proper sequence to abort the transfer */
+		if (csr & MUSB_CSR0_H_REQPKT) {
+			csr &= ~MUSB_CSR0_H_REQPKT;
+			musb_writew(epio, MUSB_CSR0, csr);
+			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+			musb_writew(epio, MUSB_CSR0, csr);
+		} else {
+			csr |= MUSB_CSR0_FLUSHFIFO;
+			musb_writew(epio, MUSB_CSR0, csr);
+			musb_writew(epio, MUSB_CSR0, csr);
+			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+			musb_writew(epio, MUSB_CSR0, csr);
+		}
+
+		musb_writeb(epio, MUSB_NAKLIMIT0, 0);
+
+		/* clear it */
+		musb_writew(epio, MUSB_CSR0, 0);
+	}
+
+	if (unlikely(!urb)) {
+		/* stop endpoint since we have no place for its data, this
+		 * SHOULD NEVER HAPPEN! */
+		ERR("no URB for end 0\n");
+
+		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+		musb_writew(epio, MUSB_CSR0, 0);
+
+		goto done;
+	}
+
+	if (!complete) {
+		/* call common logic and prepare response */
+		if (musb_h_ep0_continue(musb, len, urb)) {
+			/* more packets required */
+			csr = (MUSB_EP0_IN == musb->ep0_stage)
+				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
+		} else {
+			/* data transfer complete; perform status phase */
+			if (usb_pipeout(urb->pipe)
+					|| !urb->transfer_buffer_length)
+				csr = MUSB_CSR0_H_STATUSPKT
+					| MUSB_CSR0_H_REQPKT;
+			else
+				csr = MUSB_CSR0_H_STATUSPKT
+					| MUSB_CSR0_TXPKTRDY;
+
+			/* flag status stage */
+			musb->ep0_stage = MUSB_EP0_STATUS;
+
+			DBG(5, "ep0 STATUS, csr %04x\n", csr);
+
+		}
+		musb_writew(epio, MUSB_CSR0, csr);
+		retval = IRQ_HANDLED;
+	} else
+		musb->ep0_stage = MUSB_EP0_IDLE;
+
+	/* call completion handler if done */
+	if (complete)
+		musb_advance_schedule(musb, urb, hw_ep, 1);
+done:
+	return retval;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side TX (OUT) using Mentor DMA works as follows:
+	submit_urb ->
+		- if queue was empty, Program Endpoint
+		- ... which starts DMA to fifo in mode 1 or 0
+
+	DMA Isr (transfer complete) -> TxAvail()
+		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
+					only in musb_cleanup_urb)
+		- TxPktRdy has to be set in mode 0 or for
+			short packets in mode 1.
+*/
+
+#endif
+
+/* Service a Tx-Available or dma completion irq for the endpoint */
+void musb_host_tx(struct musb *musb, u8 epnum)
+{
+	int			pipe;
+	bool			done = false;
+	u16			tx_csr;
+	size_t			wLength = 0;
+	u8			*buf = NULL;
+	struct urb		*urb;
+	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
+	void __iomem		*epio = hw_ep->regs;
+	struct musb_qh		*qh = hw_ep->out_qh;
+	u32			status = 0;
+	void __iomem		*mbase = musb->mregs;
+	struct dma_channel	*dma;
+
+	urb = next_urb(qh);
+
+	musb_ep_select(mbase, epnum);
+	tx_csr = musb_readw(epio, MUSB_TXCSR);
+
+	/* with CPPI, DMA sometimes triggers "extra" irqs */
+	if (!urb) {
+		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+		goto finish;
+	}
+
+	pipe = urb->pipe;
+	dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
+	DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+			dma ? ", dma" : "");
+
+	/* check for errors */
+	if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
+		/* dma was disabled, fifo flushed */
+		DBG(3, "TX end %d stall\n", epnum);
+
+		/* stall; record URB status */
+		status = -EPIPE;
+
+	} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
+		/* (NON-ISO) dma was disabled, fifo flushed */
+		DBG(3, "TX 3strikes on ep=%d\n", epnum);
+
+		status = -ETIMEDOUT;
+
+	} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
+		DBG(6, "TX end=%d device not responding\n", epnum);
+
+		/* NOTE:  this code path would be a good place to PAUSE a
+		 * transfer, if there's some other (nonperiodic) tx urb
+		 * that could use this fifo.  (dma complicates it...)
+		 *
+		 * if (bulk && qh->ring.next != &musb->out_bulk), then
+		 * we have a candidate... NAKing is *NOT* an error
+		 */
+		musb_ep_select(mbase, epnum);
+		musb_writew(epio, MUSB_TXCSR,
+				MUSB_TXCSR_H_WZC_BITS
+				| MUSB_TXCSR_TXPKTRDY);
+		goto finish;
+	}
+
+	if (status) {
+		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+			(void) musb->dma_controller->channel_abort(dma);
+		}
+
+		/* do the proper sequence to abort the transfer in the
+		 * usb core; the dma engine should already be stopped.
+		 */
+		musb_h_tx_flush_fifo(hw_ep);
+		tx_csr &= ~(MUSB_TXCSR_AUTOSET
+				| MUSB_TXCSR_DMAENAB
+				| MUSB_TXCSR_H_ERROR
+				| MUSB_TXCSR_H_RXSTALL
+				| MUSB_TXCSR_H_NAKTIMEOUT
+				);
+
+		musb_ep_select(mbase, epnum);
+		musb_writew(epio, MUSB_TXCSR, tx_csr);
+		/* REVISIT may need to clear FLUSHFIFO ... */
+		musb_writew(epio, MUSB_TXCSR, tx_csr);
+		musb_writeb(epio, MUSB_TXINTERVAL, 0);
+
+		done = true;
+	}
+
+	/* second cppi case */
+	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+		goto finish;
+
+	}
+
+	/* REVISIT this looks wrong... */
+	if (!status || dma || usb_pipeisoc(pipe)) {
+		if (dma)
+			wLength = dma->actual_len;
+		else
+			wLength = qh->segsize;
+		qh->offset += wLength;
+
+		if (usb_pipeisoc(pipe)) {
+			struct usb_iso_packet_descriptor	*d;
+
+			d = urb->iso_frame_desc + qh->iso_idx;
+			d->actual_length = qh->segsize;
+			if (++qh->iso_idx >= urb->number_of_packets) {
+				done = true;
+			} else {
+				d++;
+				buf = urb->transfer_buffer + d->offset;
+				wLength = d->length;
+			}
+		} else if (dma) {
+			done = true;
+		} else {
+			/* see if we need to send more data, or ZLP */
+			if (qh->segsize < qh->maxpacket)
+				done = true;
+			else if (qh->offset == urb->transfer_buffer_length
+					&& !(urb->transfer_flags
+						& URB_ZERO_PACKET))
+				done = true;
+			if (!done) {
+				buf = urb->transfer_buffer
+						+ qh->offset;
+				wLength = urb->transfer_buffer_length
+						- qh->offset;
+			}
+		}
+	}
+
+	/* urb->status != -EINPROGRESS means request has been faulted,
+	 * so we must abort this transfer after cleanup
+	 */
+	if (urb->status != -EINPROGRESS) {
+		done = true;
+		if (status == 0)
+			status = urb->status;
+	}
+
+	if (done) {
+		/* set status */
+		urb->status = status;
+		urb->actual_length = qh->offset;
+		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
+
+	} else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
+		/* WARN_ON(!buf); */
+
+		/* REVISIT:  some docs say that when hw_ep->tx_double_buffered,
+		 * (and presumably, fifo is not half-full) we should write TWO
+		 * packets before updating TXCSR ... other docs disagree ...
+		 */
+		/* PIO:  start next packet in this URB */
+		wLength = min(qh->maxpacket, (u16) wLength);
+		musb_write_fifo(hw_ep, wLength, buf);
+		qh->segsize = wLength;
+
+		musb_ep_select(mbase, epnum);
+		musb_writew(epio, MUSB_TXCSR,
+				MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
+	} else
+		DBG(1, "not complete, but dma enabled?\n");
+
+finish:
+	return;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side RX (IN) using Mentor DMA works as follows:
+	submit_urb ->
+		- if queue was empty, ProgramEndpoint
+		- first IN token is sent out (by setting ReqPkt)
+	LinuxIsr -> RxReady()
+	/\	=> first packet is received
+	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
+	|		-> DMA Isr (transfer complete) -> RxReady()
+	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
+	|		    - if urb not complete, send next IN token (ReqPkt)
+	|			   |		else complete urb.
+	|			   |
+	---------------------------
+ *
+ * Nuances of mode 1:
+ *	For short packets, no ack (+RxPktRdy) is sent automatically
+ *	(even if AutoClear is ON)
+ *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
+ *	automatically => major problem, as collecting the next packet becomes
+ *	difficult. Hence mode 1 is not used.
+ *
+ * REVISIT
+ *	All we care about at this driver level is that
+ *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
+ *       (b) termination conditions are: short RX, or buffer full;
+ *       (c) fault modes include
+ *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
+ *             (and that endpoint's dma queue stops immediately)
+ *           - overflow (full, PLUS more bytes in the terminal packet)
+ *
+ *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
+ *	thus be a great candidate for using mode 1 ... for all but the
+ *	last packet of one URB's transfer.
+ */
+
+#endif
+
+/*
+ * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
+ * and high-bandwidth IN transfer cases.
+ */
+void musb_host_rx(struct musb *musb, u8 epnum)
+{
+	struct urb		*urb;
+	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
+	void __iomem		*epio = hw_ep->regs;
+	struct musb_qh		*qh = hw_ep->in_qh;
+	size_t			xfer_len;
+	void __iomem		*mbase = musb->mregs;
+	int			pipe;
+	u16			rx_csr, val;
+	bool			iso_err = false;
+	bool			done = false;
+	u32			status;
+	struct dma_channel	*dma;
+
+	musb_ep_select(mbase, epnum);
+
+	urb = next_urb(qh);
+	dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
+	status = 0;
+	xfer_len = 0;
+
+	rx_csr = musb_readw(epio, MUSB_RXCSR);
+	val = rx_csr;
+
+	if (unlikely(!urb)) {
+		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
+		 * usbtest #11 (unlinks) triggers it regularly, sometimes
+		 * with fifo full.  (Only with DMA??)
+		 */
+		DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
+			musb_readw(epio, MUSB_RXCOUNT));
+		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+		return;
+	}
+
+	pipe = urb->pipe;
+
+	DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
+		epnum, rx_csr, urb->actual_length,
+		dma ? dma->actual_len : 0);
+
+	/* check for errors, concurrent stall & unlink is not really
+	 * handled yet! */
+	if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
+		DBG(3, "RX end %d STALL\n", epnum);
+
+		/* stall; record URB status */
+		status = -EPIPE;
+
+	} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
+		DBG(3, "end %d RX proto error\n", epnum);
+
+		status = -EPROTO;
+		musb_writeb(epio, MUSB_RXINTERVAL, 0);
+
+	} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
+
+		if (USB_ENDPOINT_XFER_ISOC != qh->type) {
+			/* NOTE this code path would be a good place to PAUSE a
+			 * transfer, if there's some other (nonperiodic) rx urb
+			 * that could use this fifo.  (dma complicates it...)
+			 *
+			 * if (bulk && qh->ring.next != &musb->in_bulk), then
+			 * we have a candidate... NAKing is *NOT* an error
+			 */
+			DBG(6, "RX end %d NAK timeout\n", epnum);
+			musb_ep_select(mbase, epnum);
+			musb_writew(epio, MUSB_RXCSR,
+					MUSB_RXCSR_H_WZC_BITS
+					| MUSB_RXCSR_H_REQPKT);
+
+			goto finish;
+		} else {
+			DBG(4, "RX end %d ISO data error\n", epnum);
+			/* packet error reported later */
+			iso_err = true;
+		}
+	}
+
+	/* faults abort the transfer */
+	if (status) {
+		/* clean up dma and collect transfer count */
+		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+			(void) musb->dma_controller->channel_abort(dma);
+			xfer_len = dma->actual_len;
+		}
+		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+		musb_writeb(epio, MUSB_RXINTERVAL, 0);
+		done = true;
+		goto finish;
+	}
+
+	if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
+		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
+		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
+		goto finish;
+	}
+
+	/* thorough shutdown for now ... given more precise fault handling
+	 * and better queueing support, we might keep a DMA pipeline going
+	 * while processing this irq for earlier completions.
+	 */
+
+	/* FIXME this is _way_ too much in-line logic for Mentor DMA */
+
+#ifndef CONFIG_USB_INVENTRA_DMA
+	if (rx_csr & MUSB_RXCSR_H_REQPKT)  {
+		/* REVISIT this happened for a while on some short reads...
+		 * the cleanup still needs investigation... looks bad...
+		 * and also duplicates dma cleanup code above ... plus,
+		 * shouldn't this be the "half full" double buffer case?
+		 */
+		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+			(void) musb->dma_controller->channel_abort(dma);
+			xfer_len = dma->actual_len;
+			done = true;
+		}
+
+		DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+				xfer_len, dma ? ", dma" : "");
+		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+
+		musb_ep_select(mbase, epnum);
+		musb_writew(epio, MUSB_RXCSR,
+				MUSB_RXCSR_H_WZC_BITS | rx_csr);
+	}
+#endif
+	if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
+		xfer_len = dma->actual_len;
+
+		val &= ~(MUSB_RXCSR_DMAENAB
+			| MUSB_RXCSR_H_AUTOREQ
+			| MUSB_RXCSR_AUTOCLEAR
+			| MUSB_RXCSR_RXPKTRDY);
+		musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+		/* done if urb buffer is full or short packet is recd */
+		done = (urb->actual_length + xfer_len >=
+				urb->transfer_buffer_length
+			|| dma->actual_len < qh->maxpacket);
+
+		/* send IN token for next packet, without AUTOREQ */
+		if (!done) {
+			val |= MUSB_RXCSR_H_REQPKT;
+			musb_writew(epio, MUSB_RXCSR,
+				MUSB_RXCSR_H_WZC_BITS | val);
+		}
+
+		DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
+			done ? "off" : "reset",
+			musb_readw(epio, MUSB_RXCSR),
+			musb_readw(epio, MUSB_RXCOUNT));
+#else
+		done = true;
+#endif
+	} else if (urb->status == -EINPROGRESS) {
+		/* if no errors, be sure a packet is ready for unloading */
+		if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
+			status = -EPROTO;
+			ERR("Rx interrupt with no errors or packet!\n");
+
+			/* FIXME this is another "SHOULD NEVER HAPPEN" */
+
+/* SCRUB (RX) */
+			/* do the proper sequence to abort the transfer */
+			musb_ep_select(mbase, epnum);
+			val &= ~MUSB_RXCSR_H_REQPKT;
+			musb_writew(epio, MUSB_RXCSR, val);
+			goto finish;
+		}
+
+		/* we are expecting IN packets */
+#ifdef CONFIG_USB_INVENTRA_DMA
+		if (dma) {
+			struct dma_controller	*c;
+			u16			rx_count;
+			int			ret;
+
+			rx_count = musb_readw(epio, MUSB_RXCOUNT);
+
+			DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
+					epnum, rx_count,
+					urb->transfer_dma
+						+ urb->actual_length,
+					qh->offset,
+					urb->transfer_buffer_length);
+
+			c = musb->dma_controller;
+
+			dma->desired_mode = 0;
+#ifdef USE_MODE1
+			/* because of the issue below, mode 1 will
+			 * only rarely behave with correct semantics.
+			 */
+			if ((urb->transfer_flags &
+						URB_SHORT_NOT_OK)
+				&& (urb->transfer_buffer_length -
+						urb->actual_length)
+					> qh->maxpacket)
+				dma->desired_mode = 1;
+#endif
+
+/* Disadvantage of using mode 1:
+ *	It's basically usable only for mass storage class; essentially all
+ *	other protocols also terminate transfers on short packets.
+ *
+ * Details:
+ *	An extra IN token is sent at the end of the transfer (due to AUTOREQ)
+ *	If you try to use mode 1 for (transfer_buffer_length - 512), and try
+ *	to use the extra IN token to grab the last packet using mode 0, then
+ *	the problem is that you cannot be sure when the device will send the
+ *	last packet and RxPktRdy set. Sometimes the packet is recd too soon
+ *	such that it gets lost when RxCSR is re-set at the end of the mode 1
+ *	transfer, while sometimes it is recd just a little late so that if you
+ *	try to configure for mode 0 soon after the mode 1 transfer is
+ *	completed, you will find rxcount 0. Okay, so you might think why not
+ *	wait for an interrupt when the pkt is recd. Well, you won't get any!
+ */
+
+			val = musb_readw(epio, MUSB_RXCSR);
+			val &= ~MUSB_RXCSR_H_REQPKT;
+
+			if (dma->desired_mode == 0)
+				val &= ~MUSB_RXCSR_H_AUTOREQ;
+			else
+				val |= MUSB_RXCSR_H_AUTOREQ;
+			val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
+
+			musb_writew(epio, MUSB_RXCSR,
+				MUSB_RXCSR_H_WZC_BITS | val);
+
+			/* REVISIT if when actual_length != 0,
+			 * transfer_buffer_length needs to be
+			 * adjusted first...
+			 */
+			ret = c->channel_program(
+				dma, qh->maxpacket,
+				dma->desired_mode,
+				urb->transfer_dma
+					+ urb->actual_length,
+				(dma->desired_mode == 0)
+					? rx_count
+					: urb->transfer_buffer_length);
+
+			if (!ret) {
+				c->channel_release(dma);
+				hw_ep->rx_channel = NULL;
+				dma = NULL;
+				/* REVISIT reset CSR */
+			}
+		}
+#endif	/* Mentor DMA */
+
+		if (!dma) {
+			done = musb_host_packet_rx(musb, urb,
+					epnum, iso_err);
+			DBG(6, "read %spacket\n", done ? "last " : "");
+		}
+	}
+
+	if (dma && usb_pipeisoc(pipe)) {
+		struct usb_iso_packet_descriptor	*d;
+		int					iso_stat = status;
+
+		d = urb->iso_frame_desc + qh->iso_idx;
+		d->actual_length += xfer_len;
+		if (iso_err) {
+			iso_stat = -EILSEQ;
+			urb->error_count++;
+		}
+		d->status = iso_stat;
+	}
+
+finish:
+	urb->actual_length += xfer_len;
+	qh->offset += xfer_len;
+	if (done) {
+		if (urb->status == -EINPROGRESS)
+			urb->status = status;
+		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
+	}
+}
+
+/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
+ * the software schedule associates multiple such nodes with a given
+ * host side hardware endpoint + direction; scheduling may activate
+ * that hardware endpoint.
+ */
+static int musb_schedule(
+	struct musb		*musb,
+	struct musb_qh		*qh,
+	int			is_in)
+{
+	int			idle;
+	int			best_diff;
+	int			best_end, epnum;
+	struct musb_hw_ep	*hw_ep = NULL;
+	struct list_head	*head = NULL;
+
+	/* use fixed hardware for control and bulk */
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		head = &musb->control;
+		hw_ep = musb->control_ep;
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		hw_ep = musb->bulk_ep;
+		if (is_in)
+			head = &musb->in_bulk;
+		else
+			head = &musb->out_bulk;
+		break;
+	}
+	if (head) {
+		idle = list_empty(head);
+		list_add_tail(&qh->ring, head);
+		goto success;
+	}
+
+	/* else, periodic transfers get muxed to other endpoints */
+
+	/* FIXME this doesn't consider direction, so it can only
+	 * work for one half of the endpoint hardware, and assumes
+	 * the previous cases handled all non-shared endpoints...
+	 */
+
+	/* we know this qh hasn't been scheduled, so all we need to do
+	 * is choose which hardware endpoint to put it on ...
+	 *
+	 * REVISIT what we really want here is a regular schedule tree
+	 * like e.g. OHCI uses, but for now musb->periodic is just an
+	 * array of the _single_ logical endpoint associated with a
+	 * given physical one (identity mapping logical->physical).
+	 *
+	 * that simplistic approach makes TT scheduling a lot simpler;
+	 * there is none, and thus none of its complexity...
+	 */
+	best_diff = 4096;
+	best_end = -1;
+
+	for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+		int	diff;
+
+		if (musb->periodic[epnum])
+			continue;
+		hw_ep = &musb->endpoints[epnum];
+		if (hw_ep == musb->bulk_ep)
+			continue;
+
+		if (is_in)
+			diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
+		else
+			diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
+
+		if (diff > 0 && best_diff > diff) {
+			best_diff = diff;
+			best_end = epnum;
+		}
+	}
+	if (best_end < 0)
+		return -ENOSPC;
+
+	idle = 1;
+	hw_ep = musb->endpoints + best_end;
+	musb->periodic[best_end] = qh;
+	DBG(4, "qh %p periodic slot %d\n", qh, best_end);
+success:
+	qh->hw_ep = hw_ep;
+	qh->hep->hcpriv = qh;
+	if (idle)
+		musb_start_urb(musb, is_in, qh);
+	return 0;
+}
+
+static int musb_urb_enqueue(
+	struct usb_hcd			*hcd,
+	struct urb			*urb,
+	gfp_t				mem_flags)
+{
+	unsigned long			flags;
+	struct musb			*musb = hcd_to_musb(hcd);
+	struct usb_host_endpoint	*hep = urb->ep;
+	struct musb_qh			*qh = hep->hcpriv;
+	struct usb_endpoint_descriptor	*epd = &hep->desc;
+	int				ret;
+	unsigned			type_reg;
+	unsigned			interval;
+
+	/* host role must be active */
+	if (!is_host_active(musb) || !musb->is_active)
+		return -ENODEV;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	spin_unlock_irqrestore(&musb->lock, flags);
+	if (ret)
+		return ret;
+
+	/* DMA mapping was already done, if needed, and this urb is on
+	 * hep->urb_list ... so there's little to do unless hep wasn't
+	 * yet scheduled onto a live qh.
+	 *
+	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
+	 * disabled, testing for empty qh->ring and avoiding qh setup costs
+	 * except for the first urb queued after a config change.
+	 */
+	if (qh) {
+		urb->hcpriv = qh;
+		return 0;
+	}
+
+	/* Allocate and initialize qh, minimizing the work done each time
+	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
+	 *
+	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
+	 * for bugs in other kernel code to break this driver...
+	 */
+	qh = kzalloc(sizeof *qh, mem_flags);
+	if (!qh) {
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		return -ENOMEM;
+	}
+
+	qh->hep = hep;
+	qh->dev = urb->dev;
+	INIT_LIST_HEAD(&qh->ring);
+	qh->is_ready = 1;
+
+	qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+
+	/* no high bandwidth support yet */
+	if (qh->maxpacket & ~0x7ff) {
+		ret = -EMSGSIZE;
+		goto done;
+	}
+
+	qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+	qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
+	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
+
+	/* precompute rxtype/txtype/type0 register */
+	type_reg = (qh->type << 4) | qh->epnum;
+	switch (urb->dev->speed) {
+	case USB_SPEED_LOW:
+		type_reg |= 0xc0;
+		break;
+	case USB_SPEED_FULL:
+		type_reg |= 0x80;
+		break;
+	default:
+		type_reg |= 0x40;
+	}
+	qh->type_reg = type_reg;
+
+	/* precompute rxinterval/txinterval register */
+	interval = min((u8)16, epd->bInterval);	/* log encoding */
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_INT:
+		/* fullspeed uses linear encoding */
+		if (USB_SPEED_FULL == urb->dev->speed) {
+			interval = epd->bInterval;
+			if (!interval)
+				interval = 1;
+		}
+		/* FALLTHROUGH */
+	case USB_ENDPOINT_XFER_ISOC:
+		/* iso always uses log encoding */
+		break;
+	default:
+		/* REVISIT we actually want to use NAK limits, hinting to the
+		 * transfer scheduling logic to try some other qh, e.g. try
+		 * for 2 msec first:
+		 *
+		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
+		 *
+		 * The downside of disabling this is that transfer scheduling
+		 * gets VERY unfair for nonperiodic transfers; a misbehaving
+		 * peripheral could make that hurt.  Or for reads, one that's
+		 * perfectly normal:  network and other drivers keep reads
+		 * posted at all times, having one pending for a week should
+		 * be perfectly safe.
+		 *
+		 * The upside of disabling it is avoidng transfer scheduling
+		 * code to put this aside for while.
+		 */
+		interval = 0;
+	}
+	qh->intv_reg = interval;
+
+	/* precompute addressing for external hub/tt ports */
+	if (musb->is_multipoint) {
+		struct usb_device	*parent = urb->dev->parent;
+
+		if (parent != hcd->self.root_hub) {
+			qh->h_addr_reg = (u8) parent->devnum;
+
+			/* set up tt info if needed */
+			if (urb->dev->tt) {
+				qh->h_port_reg = (u8) urb->dev->ttport;
+				qh->h_addr_reg |= 0x80;
+			}
+		}
+	}
+
+	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
+	 * until we get real dma queues (with an entry for each urb/buffer),
+	 * we only have work to do in the former case.
+	 */
+	spin_lock_irqsave(&musb->lock, flags);
+	if (hep->hcpriv) {
+		/* some concurrent activity submitted another urb to hep...
+		 * odd, rare, error prone, but legal.
+		 */
+		kfree(qh);
+		ret = 0;
+	} else
+		ret = musb_schedule(musb, qh,
+				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
+
+	if (ret == 0) {
+		urb->hcpriv = qh;
+		/* FIXME set urb->start_frame for iso/intr, it's tested in
+		 * musb_start_urb(), but otherwise only konicawc cares ...
+		 */
+	}
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+done:
+	if (ret != 0) {
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		kfree(qh);
+	}
+	return ret;
+}
+
+
+/*
+ * abort a transfer that's at the head of a hardware queue.
+ * called with controller locked, irqs blocked
+ * that hardware queue advances to the next transfer, unless prevented
+ */
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+{
+	struct musb_hw_ep	*ep = qh->hw_ep;
+	void __iomem		*epio = ep->regs;
+	unsigned		hw_end = ep->epnum;
+	void __iomem		*regs = ep->musb->mregs;
+	u16			csr;
+	int			status = 0;
+
+	musb_ep_select(regs, hw_end);
+
+	if (is_dma_capable()) {
+		struct dma_channel	*dma;
+
+		dma = is_in ? ep->rx_channel : ep->tx_channel;
+		if (dma) {
+			status = ep->musb->dma_controller->channel_abort(dma);
+			DBG(status ? 1 : 3,
+				"abort %cX%d DMA for urb %p --> %d\n",
+				is_in ? 'R' : 'T', ep->epnum,
+				urb, status);
+			urb->actual_length += dma->actual_len;
+		}
+	}
+
+	/* turn off DMA requests, discard state, stop polling ... */
+	if (is_in) {
+		/* giveback saves bulk toggle */
+		csr = musb_h_flush_rxfifo(ep, 0);
+
+		/* REVISIT we still get an irq; should likely clear the
+		 * endpoint's irq status here to avoid bogus irqs.
+		 * clearing that status is platform-specific...
+		 */
+	} else {
+		musb_h_tx_flush_fifo(ep);
+		csr = musb_readw(epio, MUSB_TXCSR);
+		csr &= ~(MUSB_TXCSR_AUTOSET
+			| MUSB_TXCSR_DMAENAB
+			| MUSB_TXCSR_H_RXSTALL
+			| MUSB_TXCSR_H_NAKTIMEOUT
+			| MUSB_TXCSR_H_ERROR
+			| MUSB_TXCSR_TXPKTRDY);
+		musb_writew(epio, MUSB_TXCSR, csr);
+		/* REVISIT may need to clear FLUSHFIFO ... */
+		musb_writew(epio, MUSB_TXCSR, csr);
+		/* flush cpu writebuffer */
+		csr = musb_readw(epio, MUSB_TXCSR);
+	}
+	if (status == 0)
+		musb_advance_schedule(ep->musb, urb, ep, is_in);
+	return status;
+}
+
+static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct musb		*musb = hcd_to_musb(hcd);
+	struct musb_qh		*qh;
+	struct list_head	*sched;
+	unsigned long		flags;
+	int			ret;
+
+	DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
+			usb_pipedevice(urb->pipe),
+			usb_pipeendpoint(urb->pipe),
+			usb_pipein(urb->pipe) ? "in" : "out");
+
+	spin_lock_irqsave(&musb->lock, flags);
+	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (ret)
+		goto done;
+
+	qh = urb->hcpriv;
+	if (!qh)
+		goto done;
+
+	/* Any URB not actively programmed into endpoint hardware can be
+	 * immediately given back.  Such an URB must be at the head of its
+	 * endpoint queue, unless someday we get real DMA queues.  And even
+	 * then, it might not be known to the hardware...
+	 *
+	 * Otherwise abort current transfer, pending dma, etc.; urb->status
+	 * has already been updated.  This is a synchronous abort; it'd be
+	 * OK to hold off until after some IRQ, though.
+	 */
+	if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
+		ret = -EINPROGRESS;
+	else {
+		switch (qh->type) {
+		case USB_ENDPOINT_XFER_CONTROL:
+			sched = &musb->control;
+			break;
+		case USB_ENDPOINT_XFER_BULK:
+			if (usb_pipein(urb->pipe))
+				sched = &musb->in_bulk;
+			else
+				sched = &musb->out_bulk;
+			break;
+		default:
+			/* REVISIT when we get a schedule tree, periodic
+			 * transfers won't always be at the head of a
+			 * singleton queue...
+			 */
+			sched = NULL;
+			break;
+		}
+	}
+
+	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
+	if (ret < 0 || (sched && qh != first_qh(sched))) {
+		int	ready = qh->is_ready;
+
+		ret = 0;
+		qh->is_ready = 0;
+		__musb_giveback(musb, urb, 0);
+		qh->is_ready = ready;
+	} else
+		ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+done:
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return ret;
+}
+
+/* disable an endpoint */
+static void
+musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+	u8			epnum = hep->desc.bEndpointAddress;
+	unsigned long		flags;
+	struct musb		*musb = hcd_to_musb(hcd);
+	u8			is_in = epnum & USB_DIR_IN;
+	struct musb_qh		*qh = hep->hcpriv;
+	struct urb		*urb, *tmp;
+	struct list_head	*sched;
+
+	if (!qh)
+		return;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		sched = &musb->control;
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		if (is_in)
+			sched = &musb->in_bulk;
+		else
+			sched = &musb->out_bulk;
+		break;
+	default:
+		/* REVISIT when we get a schedule tree, periodic transfers
+		 * won't always be at the head of a singleton queue...
+		 */
+		sched = NULL;
+		break;
+	}
+
+	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
+
+	/* kick first urb off the hardware, if needed */
+	qh->is_ready = 0;
+	if (!sched || qh == first_qh(sched)) {
+		urb = next_urb(qh);
+
+		/* make software (then hardware) stop ASAP */
+		if (!urb->unlinked)
+			urb->status = -ESHUTDOWN;
+
+		/* cleanup */
+		musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+	} else
+		urb = NULL;
+
+	/* then just nuke all the others */
+	list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
+		musb_giveback(qh, urb, -ESHUTDOWN);
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int musb_h_get_frame_number(struct usb_hcd *hcd)
+{
+	struct musb	*musb = hcd_to_musb(hcd);
+
+	return musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_h_start(struct usb_hcd *hcd)
+{
+	struct musb	*musb = hcd_to_musb(hcd);
+
+	/* NOTE: musb_start() is called when the hub driver turns
+	 * on port power, or when (OTG) peripheral starts.
+	 */
+	hcd->state = HC_STATE_RUNNING;
+	musb->port1_status = 0;
+	return 0;
+}
+
+static void musb_h_stop(struct usb_hcd *hcd)
+{
+	musb_stop(hcd_to_musb(hcd));
+	hcd->state = HC_STATE_HALT;
+}
+
+static int musb_bus_suspend(struct usb_hcd *hcd)
+{
+	struct musb	*musb = hcd_to_musb(hcd);
+
+	if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
+		return 0;
+
+	if (is_host_active(musb) && musb->is_active) {
+		WARNING("trying to suspend as %s is_active=%i\n",
+			otg_state_string(musb), musb->is_active);
+		return -EBUSY;
+	} else
+		return 0;
+}
+
+static int musb_bus_resume(struct usb_hcd *hcd)
+{
+	/* resuming child port does the work */
+	return 0;
+}
+
+const struct hc_driver musb_hc_driver = {
+	.description		= "musb-hcd",
+	.product_desc		= "MUSB HDRC host driver",
+	.hcd_priv_size		= sizeof(struct musb),
+	.flags			= HCD_USB2 | HCD_MEMORY,
+
+	/* not using irq handler or reset hooks from usbcore, since
+	 * those must be shared with peripheral code for OTG configs
+	 */
+
+	.start			= musb_h_start,
+	.stop			= musb_h_stop,
+
+	.get_frame_number	= musb_h_get_frame_number,
+
+	.urb_enqueue		= musb_urb_enqueue,
+	.urb_dequeue		= musb_urb_dequeue,
+	.endpoint_disable	= musb_h_disable,
+
+	.hub_status_data	= musb_hub_status_data,
+	.hub_control		= musb_hub_control,
+	.bus_suspend		= musb_bus_suspend,
+	.bus_resume		= musb_bus_resume,
+	/* .start_port_reset	= NULL, */
+	/* .hub_irq_enable	= NULL, */
+};
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
new file mode 100644
index 0000000..77bcdb9
--- /dev/null
+++ b/drivers/usb/musb/musb_host.h
@@ -0,0 +1,110 @@
+/*
+ * MUSB OTG driver host defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MUSB_HOST_H
+#define _MUSB_HOST_H
+
+static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
+{
+	return container_of((void *) musb, struct usb_hcd, hcd_priv);
+}
+
+static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+	return (struct musb *) (hcd->hcd_priv);
+}
+
+/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
+struct musb_qh {
+	struct usb_host_endpoint *hep;		/* usbcore info */
+	struct usb_device	*dev;
+	struct musb_hw_ep	*hw_ep;		/* current binding */
+
+	struct list_head	ring;		/* of musb_qh */
+	/* struct musb_qh		*next; */	/* for periodic tree */
+
+	unsigned		offset;		/* in urb->transfer_buffer */
+	unsigned		segsize;	/* current xfer fragment */
+
+	u8			type_reg;	/* {rx,tx} type register */
+	u8			intv_reg;	/* {rx,tx} interval register */
+	u8			addr_reg;	/* device address register */
+	u8			h_addr_reg;	/* hub address register */
+	u8			h_port_reg;	/* hub port register */
+
+	u8			is_ready;	/* safe to modify hw_ep */
+	u8			type;		/* XFERTYPE_* */
+	u8			epnum;
+	u16			maxpacket;
+	u16			frame;		/* for periodic schedule */
+	unsigned		iso_idx;	/* in urb->iso_frame_desc[] */
+};
+
+/* map from control or bulk queue head to the first qh on that ring */
+static inline struct musb_qh *first_qh(struct list_head *q)
+{
+	if (list_empty(q))
+		return NULL;
+	return list_entry(q->next, struct musb_qh, ring);
+}
+
+
+extern void musb_root_disconnect(struct musb *musb);
+
+struct usb_hcd;
+
+extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf);
+extern int musb_hub_control(struct usb_hcd *hcd,
+			u16 typeReq, u16 wValue, u16 wIndex,
+			char *buf, u16 wLength);
+
+extern const struct hc_driver musb_hc_driver;
+
+static inline struct urb *next_urb(struct musb_qh *qh)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	struct list_head	*queue;
+
+	if (!qh)
+		return NULL;
+	queue = &qh->hep->urb_list;
+	if (list_empty(queue))
+		return NULL;
+	return list_entry(queue->next, struct urb, urb_list);
+#else
+	return NULL;
+#endif
+}
+
+#endif				/* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
new file mode 100644
index 0000000..6bbedae
--- /dev/null
+++ b/drivers/usb/musb/musb_io.h
@@ -0,0 +1,115 @@
+/*
+ * MUSB OTG driver register I/O
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
+#define __MUSB_LINUX_PLATFORM_ARCH_H__
+
+#include <linux/io.h>
+
+#ifndef	CONFIG_ARM
+static inline void readsl(const void __iomem *addr, void *buf, int len)
+	{ insl((unsigned long)addr, buf, len); }
+static inline void readsw(const void __iomem *addr, void *buf, int len)
+	{ insw((unsigned long)addr, buf, len); }
+static inline void readsb(const void __iomem *addr, void *buf, int len)
+	{ insb((unsigned long)addr, buf, len); }
+
+static inline void writesl(const void __iomem *addr, const void *buf, int len)
+	{ outsl((unsigned long)addr, buf, len); }
+static inline void writesw(const void __iomem *addr, const void *buf, int len)
+	{ outsw((unsigned long)addr, buf, len); }
+static inline void writesb(const void __iomem *addr, const void *buf, int len)
+	{ outsb((unsigned long)addr, buf, len); }
+
+#endif
+
+/* NOTE:  these offsets are all in bytes */
+
+static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+	{ return __raw_readw(addr + offset); }
+
+static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+	{ return __raw_readl(addr + offset); }
+
+
+static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+	{ __raw_writew(data, addr + offset); }
+
+static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+	{ __raw_writel(data, addr + offset); }
+
+
+#ifdef CONFIG_USB_TUSB6010
+
+/*
+ * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+ */
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+{
+	u16 tmp;
+	u8 val;
+
+	tmp = __raw_readw(addr + (offset & ~1));
+	if (offset & 1)
+		val = (tmp >> 8);
+	else
+		val = tmp & 0xff;
+
+	return val;
+}
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+	u16 tmp;
+
+	tmp = __raw_readw(addr + (offset & ~1));
+	if (offset & 1)
+		tmp = (data << 8) | (tmp & 0xff);
+	else
+		tmp = (tmp & 0xff00) | data;
+
+	__raw_writew(tmp, addr + (offset & ~1));
+}
+
+#else
+
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+	{ return __raw_readb(addr + offset); }
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+	{ __raw_writeb(data, addr + offset); }
+
+#endif	/* CONFIG_USB_TUSB6010 */
+
+#endif
diff --git a/drivers/usb/musb/musb_procfs.c b/drivers/usb/musb/musb_procfs.c
new file mode 100644
index 0000000..55e6b78
--- /dev/null
+++ b/drivers/usb/musb/musb_procfs.c
@@ -0,0 +1,830 @@
+/*
+ * MUSB OTG driver debug support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>	/* FIXME remove procfs writes */
+#include <asm/arch/hardware.h>
+
+#include "musb_core.h"
+
+#include "davinci.h"
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+static int dump_qh(struct musb_qh *qh, char *buf, unsigned max)
+{
+	int				count;
+	int				tmp;
+	struct usb_host_endpoint	*hep = qh->hep;
+	struct urb			*urb;
+
+	count = snprintf(buf, max, "    qh %p dev%d ep%d%s max%d\n",
+			qh, qh->dev->devnum, qh->epnum,
+			({ char *s; switch (qh->type) {
+			case USB_ENDPOINT_XFER_BULK:
+				s = "-bulk"; break;
+			case USB_ENDPOINT_XFER_INT:
+				s = "-int"; break;
+			case USB_ENDPOINT_XFER_CONTROL:
+				s = ""; break;
+			default:
+				s = "iso"; break;
+			}; s; }),
+			qh->maxpacket);
+	if (count <= 0)
+		return 0;
+	buf += count;
+	max -= count;
+
+	list_for_each_entry(urb, &hep->urb_list, urb_list) {
+		tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n",
+				usb_pipein(urb->pipe) ? "in" : "out",
+				urb, urb->actual_length,
+				urb->transfer_buffer_length);
+		if (tmp <= 0)
+			break;
+		tmp = min(tmp, (int)max);
+		count += tmp;
+		buf += tmp;
+		max -= tmp;
+	}
+	return count;
+}
+
+static int
+dump_queue(struct list_head *q, char *buf, unsigned max)
+{
+	int		count = 0;
+	struct musb_qh	*qh;
+
+	list_for_each_entry(qh, q, ring) {
+		int	tmp;
+
+		tmp = dump_qh(qh, buf, max);
+		if (tmp <= 0)
+			break;
+		tmp = min(tmp, (int)max);
+		count += tmp;
+		buf += tmp;
+		max -= tmp;
+	}
+	return count;
+}
+
+#endif	/* HCD */
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max)
+{
+	char		*buf = buffer;
+	int		code = 0;
+	void __iomem	*regs = ep->hw_ep->regs;
+	char		*mode = "1buf";
+
+	if (ep->is_in) {
+		if (ep->hw_ep->tx_double_buffered)
+			mode = "2buf";
+	} else {
+		if (ep->hw_ep->rx_double_buffered)
+			mode = "2buf";
+	}
+
+	do {
+		struct usb_request	*req;
+
+		code = snprintf(buf, max,
+				"\n%s (hw%d): %s%s, csr %04x maxp %04x\n",
+				ep->name, ep->current_epnum,
+				mode, ep->dma ? " dma" : "",
+				musb_readw(regs,
+					(ep->is_in || !ep->current_epnum)
+						? MUSB_TXCSR
+						: MUSB_RXCSR),
+				musb_readw(regs, ep->is_in
+						? MUSB_TXMAXP
+						: MUSB_RXMAXP)
+				);
+		if (code <= 0)
+			break;
+		code = min(code, (int) max);
+		buf += code;
+		max -= code;
+
+		if (is_cppi_enabled() && ep->current_epnum) {
+			unsigned	cppi = ep->current_epnum - 1;
+			void __iomem	*base = ep->musb->ctrl_base;
+			unsigned	off1 = cppi << 2;
+			void __iomem	*ram = base;
+			char		tmp[16];
+
+			if (ep->is_in) {
+				ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi);
+				tmp[0] = 0;
+			} else {
+				ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi);
+				snprintf(tmp, sizeof tmp, "%d left, ",
+					musb_readl(base,
+					DAVINCI_RXCPPI_BUFCNT0_REG + off1));
+			}
+
+			code = snprintf(buf, max, "%cX DMA%d: %s"
+					"%08x %08x, %08x %08x; "
+					"%08x %08x %08x .. %08x\n",
+				ep->is_in ? 'T' : 'R',
+				ep->current_epnum - 1, tmp,
+				musb_readl(ram, 0 * 4),
+				musb_readl(ram, 1 * 4),
+				musb_readl(ram, 2 * 4),
+				musb_readl(ram, 3 * 4),
+				musb_readl(ram, 4 * 4),
+				musb_readl(ram, 5 * 4),
+				musb_readl(ram, 6 * 4),
+				musb_readl(ram, 7 * 4));
+			if (code <= 0)
+				break;
+			code = min(code, (int) max);
+			buf += code;
+			max -= code;
+		}
+
+		if (list_empty(&ep->req_list)) {
+			code = snprintf(buf, max, "\t(queue empty)\n");
+			if (code <= 0)
+				break;
+			code = min(code, (int) max);
+			buf += code;
+			max -= code;
+			break;
+		}
+		list_for_each_entry(req, &ep->req_list, list) {
+			code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n",
+					req,
+					req->zero ? "zero, " : "",
+					req->short_not_ok ? "!short, " : "",
+					req->actual, req->length);
+			if (code <= 0)
+				break;
+			code = min(code, (int) max);
+			buf += code;
+			max -= code;
+		}
+	} while (0);
+	return buf - buffer;
+}
+#endif
+
+static int
+dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max)
+{
+	int			code = 0;
+	char			*buf = aBuffer;
+	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
+
+	do {
+		musb_ep_select(musb->mregs, epnum);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+		if (is_host_active(musb)) {
+			int		dump_rx, dump_tx;
+			void __iomem	*regs = hw_ep->regs;
+
+			/* TEMPORARY (!) until we have a real periodic
+			 * schedule tree ...
+			 */
+			if (!epnum) {
+				/* control is shared, uses RX queue
+				 * but (mostly) shadowed tx registers
+				 */
+				dump_tx = !list_empty(&musb->control);
+				dump_rx = 0;
+			} else if (hw_ep == musb->bulk_ep) {
+				dump_tx = !list_empty(&musb->out_bulk);
+				dump_rx = !list_empty(&musb->in_bulk);
+			} else if (musb->periodic[epnum]) {
+				struct usb_host_endpoint	*hep;
+
+				hep = musb->periodic[epnum]->hep;
+				dump_rx = hep->desc.bEndpointAddress
+						& USB_ENDPOINT_DIR_MASK;
+				dump_tx = !dump_rx;
+			} else
+				break;
+			/* END TEMPORARY */
+
+
+			if (dump_rx) {
+				code = snprintf(buf, max,
+					"\nRX%d: %s rxcsr %04x interval %02x "
+					"max %04x type %02x; "
+					"dev %d hub %d port %d"
+					"\n",
+					epnum,
+					hw_ep->rx_double_buffered
+						? "2buf" : "1buf",
+					musb_readw(regs, MUSB_RXCSR),
+					musb_readb(regs, MUSB_RXINTERVAL),
+					musb_readw(regs, MUSB_RXMAXP),
+					musb_readb(regs, MUSB_RXTYPE),
+					/* FIXME:  assumes multipoint */
+					musb_readb(musb->mregs,
+						MUSB_BUSCTL_OFFSET(epnum,
+						MUSB_RXFUNCADDR)),
+					musb_readb(musb->mregs,
+						MUSB_BUSCTL_OFFSET(epnum,
+						MUSB_RXHUBADDR)),
+					musb_readb(musb->mregs,
+						MUSB_BUSCTL_OFFSET(epnum,
+						MUSB_RXHUBPORT))
+					);
+				if (code <= 0)
+					break;
+				code = min(code, (int) max);
+				buf += code;
+				max -= code;
+
+				if (is_cppi_enabled()
+						&& epnum
+						&& hw_ep->rx_channel) {
+					unsigned	cppi = epnum - 1;
+					unsigned	off1 = cppi << 2;
+					void __iomem	*base;
+					void __iomem	*ram;
+					char		tmp[16];
+
+					base = musb->ctrl_base;
+					ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
+							cppi) + base;
+					snprintf(tmp, sizeof tmp, "%d left, ",
+						musb_readl(base,
+						DAVINCI_RXCPPI_BUFCNT0_REG
+								+ off1));
+
+					code = snprintf(buf, max,
+						"    rx dma%d: %s"
+						"%08x %08x, %08x %08x; "
+						"%08x %08x %08x .. %08x\n",
+						cppi, tmp,
+						musb_readl(ram, 0 * 4),
+						musb_readl(ram, 1 * 4),
+						musb_readl(ram, 2 * 4),
+						musb_readl(ram, 3 * 4),
+						musb_readl(ram, 4 * 4),
+						musb_readl(ram, 5 * 4),
+						musb_readl(ram, 6 * 4),
+						musb_readl(ram, 7 * 4));
+					if (code <= 0)
+						break;
+					code = min(code, (int) max);
+					buf += code;
+					max -= code;
+				}
+
+				if (hw_ep == musb->bulk_ep
+						&& !list_empty(
+							&musb->in_bulk)) {
+					code = dump_queue(&musb->in_bulk,
+							buf, max);
+					if (code <= 0)
+						break;
+					code = min(code, (int) max);
+					buf += code;
+					max -= code;
+				} else if (musb->periodic[epnum]) {
+					code = dump_qh(musb->periodic[epnum],
+							buf, max);
+					if (code <= 0)
+						break;
+					code = min(code, (int) max);
+					buf += code;
+					max -= code;
+				}
+			}
+
+			if (dump_tx) {
+				code = snprintf(buf, max,
+					"\nTX%d: %s txcsr %04x interval %02x "
+					"max %04x type %02x; "
+					"dev %d hub %d port %d"
+					"\n",
+					epnum,
+					hw_ep->tx_double_buffered
+						? "2buf" : "1buf",
+					musb_readw(regs, MUSB_TXCSR),
+					musb_readb(regs, MUSB_TXINTERVAL),
+					musb_readw(regs, MUSB_TXMAXP),
+					musb_readb(regs, MUSB_TXTYPE),
+					/* FIXME:  assumes multipoint */
+					musb_readb(musb->mregs,
+						MUSB_BUSCTL_OFFSET(epnum,
+						MUSB_TXFUNCADDR)),
+					musb_readb(musb->mregs,
+						MUSB_BUSCTL_OFFSET(epnum,
+						MUSB_TXHUBADDR)),
+					musb_readb(musb->mregs,
+						MUSB_BUSCTL_OFFSET(epnum,
+						MUSB_TXHUBPORT))
+					);
+				if (code <= 0)
+					break;
+				code = min(code, (int) max);
+				buf += code;
+				max -= code;
+
+				if (is_cppi_enabled()
+						&& epnum
+						&& hw_ep->tx_channel) {
+					unsigned	cppi = epnum - 1;
+					void __iomem	*base;
+					void __iomem	*ram;
+
+					base = musb->ctrl_base;
+					ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
+							cppi) + base;
+					code = snprintf(buf, max,
+						"    tx dma%d: "
+						"%08x %08x, %08x %08x; "
+						"%08x %08x %08x .. %08x\n",
+						cppi,
+						musb_readl(ram, 0 * 4),
+						musb_readl(ram, 1 * 4),
+						musb_readl(ram, 2 * 4),
+						musb_readl(ram, 3 * 4),
+						musb_readl(ram, 4 * 4),
+						musb_readl(ram, 5 * 4),
+						musb_readl(ram, 6 * 4),
+						musb_readl(ram, 7 * 4));
+					if (code <= 0)
+						break;
+					code = min(code, (int) max);
+					buf += code;
+					max -= code;
+				}
+
+				if (hw_ep == musb->control_ep
+						&& !list_empty(
+							&musb->control)) {
+					code = dump_queue(&musb->control,
+							buf, max);
+					if (code <= 0)
+						break;
+					code = min(code, (int) max);
+					buf += code;
+					max -= code;
+				} else if (hw_ep == musb->bulk_ep
+						&& !list_empty(
+							&musb->out_bulk)) {
+					code = dump_queue(&musb->out_bulk,
+							buf, max);
+					if (code <= 0)
+						break;
+					code = min(code, (int) max);
+					buf += code;
+					max -= code;
+				} else if (musb->periodic[epnum]) {
+					code = dump_qh(musb->periodic[epnum],
+							buf, max);
+					if (code <= 0)
+						break;
+					code = min(code, (int) max);
+					buf += code;
+					max -= code;
+				}
+			}
+		}
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+		if (is_peripheral_active(musb)) {
+			code = 0;
+
+			if (hw_ep->ep_in.desc || !epnum) {
+				code = dump_ep(&hw_ep->ep_in, buf, max);
+				if (code <= 0)
+					break;
+				code = min(code, (int) max);
+				buf += code;
+				max -= code;
+			}
+			if (hw_ep->ep_out.desc) {
+				code = dump_ep(&hw_ep->ep_out, buf, max);
+				if (code <= 0)
+					break;
+				code = min(code, (int) max);
+				buf += code;
+				max -= code;
+			}
+		}
+#endif
+	} while (0);
+
+	return buf - aBuffer;
+}
+
+/* Dump the current status and compile options.
+ * @param musb the device driver instance
+ * @param buffer where to dump the status; it must be big enough to hold the
+ * result otherwise "BAD THINGS HAPPENS(TM)".
+ */
+static int dump_header_stats(struct musb *musb, char *buffer)
+{
+	int code, count = 0;
+	const void __iomem *mbase = musb->mregs;
+
+	*buffer = 0;
+	count = sprintf(buffer, "Status: %sHDRC, Mode=%s "
+				"(Power=%02x, DevCtl=%02x)\n",
+			(musb->is_multipoint ? "M" : ""), MUSB_MODE(musb),
+			musb_readb(mbase, MUSB_POWER),
+			musb_readb(mbase, MUSB_DEVCTL));
+	if (count <= 0)
+		return 0;
+	buffer += count;
+
+	code = sprintf(buffer, "OTG state: %s; %sactive\n",
+			otg_state_string(musb),
+			musb->is_active ? "" : "in");
+	if (code <= 0)
+		goto done;
+	buffer += code;
+	count += code;
+
+	code = sprintf(buffer,
+			"Options: "
+#ifdef CONFIG_MUSB_PIO_ONLY
+			"pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+			"cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+			"musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+			"tusb-omap-dma"
+#else
+			"?dma?"
+#endif
+			", "
+#ifdef CONFIG_USB_MUSB_OTG
+			"otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+			"peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+			"host"
+#endif
+			", debug=%d [eps=%d]\n",
+		debug,
+		musb->nr_endpoints);
+	if (code <= 0)
+		goto done;
+	count += code;
+	buffer += code;
+
+#ifdef	CONFIG_USB_GADGET_MUSB_HDRC
+	code = sprintf(buffer, "Peripheral address: %02x\n",
+			musb_readb(musb->ctrl_base, MUSB_FADDR));
+	if (code <= 0)
+		goto done;
+	buffer += code;
+	count += code;
+#endif
+
+#ifdef	CONFIG_USB_MUSB_HDRC_HCD
+	code = sprintf(buffer, "Root port status: %08x\n",
+			musb->port1_status);
+	if (code <= 0)
+		goto done;
+	buffer += code;
+	count += code;
+#endif
+
+#ifdef	CONFIG_ARCH_DAVINCI
+	code = sprintf(buffer,
+			"DaVinci: ctrl=%02x stat=%1x phy=%03x\n"
+			"\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x"
+			"\n",
+			musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG),
+			musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG),
+			__raw_readl((void __force __iomem *)
+					IO_ADDRESS(USBPHY_CTL_PADDR)),
+			musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG),
+			musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG),
+			musb_readl(musb->ctrl_base,
+					DAVINCI_USB_INT_SOURCE_REG),
+			musb_readl(musb->ctrl_base,
+					DAVINCI_USB_INT_MASK_REG));
+	if (code <= 0)
+		goto done;
+	count += code;
+	buffer += code;
+#endif	/* DAVINCI */
+
+#ifdef CONFIG_USB_TUSB6010
+	code = sprintf(buffer,
+			"TUSB6010: devconf %08x, phy enable %08x drive %08x"
+			"\n\totg %03x timer %08x"
+			"\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x"
+			"\n",
+			musb_readl(musb->ctrl_base, TUSB_DEV_CONF),
+			musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE),
+			musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL),
+			musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT),
+			musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER),
+			musb_readl(musb->ctrl_base, TUSB_PRCM_CONF),
+			musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT),
+			musb_readl(musb->ctrl_base, TUSB_INT_SRC),
+			musb_readl(musb->ctrl_base, TUSB_INT_MASK));
+	if (code <= 0)
+		goto done;
+	count += code;
+	buffer += code;
+#endif	/* DAVINCI */
+
+	if (is_cppi_enabled() && musb->dma_controller) {
+		code = sprintf(buffer,
+				"CPPI: txcr=%d txsrc=%01x txena=%01x; "
+				"rxcr=%d rxsrc=%01x rxena=%01x "
+				"\n",
+				musb_readl(musb->ctrl_base,
+						DAVINCI_TXCPPI_CTRL_REG),
+				musb_readl(musb->ctrl_base,
+						DAVINCI_TXCPPI_RAW_REG),
+				musb_readl(musb->ctrl_base,
+						DAVINCI_TXCPPI_INTENAB_REG),
+				musb_readl(musb->ctrl_base,
+						DAVINCI_RXCPPI_CTRL_REG),
+				musb_readl(musb->ctrl_base,
+						DAVINCI_RXCPPI_RAW_REG),
+				musb_readl(musb->ctrl_base,
+						DAVINCI_RXCPPI_INTENAB_REG));
+		if (code <= 0)
+			goto done;
+		count += code;
+		buffer += code;
+	}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+	if (is_peripheral_enabled(musb)) {
+		code = sprintf(buffer, "Gadget driver: %s\n",
+				musb->gadget_driver
+					? musb->gadget_driver->driver.name
+					: "(none)");
+		if (code <= 0)
+			goto done;
+		count += code;
+		buffer += code;
+	}
+#endif
+
+done:
+	return count;
+}
+
+/* Write to ProcFS
+ *
+ * C soft-connect
+ * c soft-disconnect
+ * I enable HS
+ * i disable HS
+ * s stop session
+ * F force session (OTG-unfriendly)
+ * E rElinquish bus (OTG)
+ * H request host mode
+ * h cancel host request
+ * T start sending TEST_PACKET
+ * D<num> set/query the debug level
+ */
+static int musb_proc_write(struct file *file, const char __user *buffer,
+			unsigned long count, void *data)
+{
+	char cmd;
+	u8 reg;
+	struct musb *musb = (struct musb *)data;
+	void __iomem *mbase = musb->mregs;
+
+	/* MOD_INC_USE_COUNT; */
+
+	if (unlikely(copy_from_user(&cmd, buffer, 1)))
+		return -EFAULT;
+
+	switch (cmd) {
+	case 'C':
+		if (mbase) {
+			reg = musb_readb(mbase, MUSB_POWER)
+					| MUSB_POWER_SOFTCONN;
+			musb_writeb(mbase, MUSB_POWER, reg);
+		}
+		break;
+
+	case 'c':
+		if (mbase) {
+			reg = musb_readb(mbase, MUSB_POWER)
+					& ~MUSB_POWER_SOFTCONN;
+			musb_writeb(mbase, MUSB_POWER, reg);
+		}
+		break;
+
+	case 'I':
+		if (mbase) {
+			reg = musb_readb(mbase, MUSB_POWER)
+					| MUSB_POWER_HSENAB;
+			musb_writeb(mbase, MUSB_POWER, reg);
+		}
+		break;
+
+	case 'i':
+		if (mbase) {
+			reg = musb_readb(mbase, MUSB_POWER)
+					& ~MUSB_POWER_HSENAB;
+			musb_writeb(mbase, MUSB_POWER, reg);
+		}
+		break;
+
+	case 'F':
+		reg = musb_readb(mbase, MUSB_DEVCTL);
+		reg |= MUSB_DEVCTL_SESSION;
+		musb_writeb(mbase, MUSB_DEVCTL, reg);
+		break;
+
+	case 'H':
+		if (mbase) {
+			reg = musb_readb(mbase, MUSB_DEVCTL);
+			reg |= MUSB_DEVCTL_HR;
+			musb_writeb(mbase, MUSB_DEVCTL, reg);
+			/* MUSB_HST_MODE( ((struct musb*)data) ); */
+			/* WARNING("Host Mode\n"); */
+		}
+		break;
+
+	case 'h':
+		if (mbase) {
+			reg = musb_readb(mbase, MUSB_DEVCTL);
+			reg &= ~MUSB_DEVCTL_HR;
+			musb_writeb(mbase, MUSB_DEVCTL, reg);
+		}
+		break;
+
+	case 'T':
+		if (mbase) {
+			musb_load_testpacket(musb);
+			musb_writeb(mbase, MUSB_TESTMODE,
+					MUSB_TEST_PACKET);
+		}
+		break;
+
+#if (MUSB_DEBUG > 0)
+		/* set/read debug level */
+	case 'D':{
+			if (count > 1) {
+				char digits[8], *p = digits;
+				int i = 0, level = 0, sign = 1;
+				int len = min(count - 1, (unsigned long)8);
+
+				if (copy_from_user(&digits, &buffer[1], len))
+					return -EFAULT;
+
+				/* optional sign */
+				if (*p == '-') {
+					len -= 1;
+					sign = -sign;
+					p++;
+				}
+
+				/* read it */
+				while (i++ < len && *p > '0' && *p < '9') {
+					level = level * 10 + (*p - '0');
+					p++;
+				}
+
+				level *= sign;
+				DBG(1, "debug level %d\n", level);
+				debug = level;
+			}
+		}
+		break;
+
+
+	case '?':
+		INFO("?: you are seeing it\n");
+		INFO("C/c: soft connect enable/disable\n");
+		INFO("I/i: hispeed enable/disable\n");
+		INFO("F: force session start\n");
+		INFO("H: host mode\n");
+		INFO("T: start sending TEST_PACKET\n");
+		INFO("D: set/read dbug level\n");
+		break;
+#endif
+
+	default:
+		ERR("Command %c not implemented\n", cmd);
+		break;
+	}
+
+	musb_platform_try_idle(musb, 0);
+
+	return count;
+}
+
+static int musb_proc_read(char *page, char **start,
+			off_t off, int count, int *eof, void *data)
+{
+	char *buffer = page;
+	int code = 0;
+	unsigned long	flags;
+	struct musb	*musb = data;
+	unsigned	epnum;
+
+	count -= off;
+	count -= 1;		/* for NUL at end */
+	if (count <= 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	code = dump_header_stats(musb, buffer);
+	if (code > 0) {
+		buffer += code;
+		count -= code;
+	}
+
+	/* generate the report for the end points */
+	/* REVISIT ... not unless something's connected! */
+	for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints;
+			epnum++) {
+		code = dump_end_info(musb, epnum, buffer, count);
+		if (code > 0) {
+			buffer += code;
+			count -= code;
+		}
+	}
+
+	musb_platform_try_idle(musb, 0);
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+	*eof = 1;
+
+	return buffer - page;
+}
+
+void __devexit musb_debug_delete(char *name, struct musb *musb)
+{
+	if (musb->proc_entry)
+		remove_proc_entry(name, NULL);
+}
+
+struct proc_dir_entry *__init
+musb_debug_create(char *name, struct musb *data)
+{
+	struct proc_dir_entry	*pde;
+
+	/* FIXME convert everything to seq_file; then later, debugfs */
+
+	if (!name)
+		return NULL;
+
+	pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL);
+	data->proc_entry = pde;
+	if (pde) {
+		pde->data = data;
+		/* pde->owner = THIS_MODULE; */
+
+		pde->read_proc = musb_proc_read;
+		pde->write_proc = musb_proc_write;
+
+		pde->size = 0;
+
+		pr_debug("Registered /proc/%s\n", name);
+	} else {
+		pr_debug("Cannot create a valid proc file entry");
+	}
+
+	return pde;
+}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
new file mode 100644
index 0000000..9c22866
--- /dev/null
+++ b/drivers/usb/musb/musb_regs.h
@@ -0,0 +1,300 @@
+/*
+ * MUSB OTG driver register defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_REGS_H__
+#define __MUSB_REGS_H__
+
+#define MUSB_EP0_FIFOSIZE	64	/* This is non-configurable */
+
+/*
+ * Common USB registers
+ */
+
+#define MUSB_FADDR		0x00	/* 8-bit */
+#define MUSB_POWER		0x01	/* 8-bit */
+
+#define MUSB_INTRTX		0x02	/* 16-bit */
+#define MUSB_INTRRX		0x04
+#define MUSB_INTRTXE		0x06
+#define MUSB_INTRRXE		0x08
+#define MUSB_INTRUSB		0x0A	/* 8 bit */
+#define MUSB_INTRUSBE		0x0B	/* 8 bit */
+#define MUSB_FRAME		0x0C
+#define MUSB_INDEX		0x0E	/* 8 bit */
+#define MUSB_TESTMODE		0x0F	/* 8 bit */
+
+/* Get offset for a given FIFO from musb->mregs */
+#ifdef	CONFIG_USB_TUSB6010
+#define MUSB_FIFO_OFFSET(epnum)	(0x200 + ((epnum) * 0x20))
+#else
+#define MUSB_FIFO_OFFSET(epnum)	(0x20 + ((epnum) * 4))
+#endif
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL		0x60	/* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MUSB_TXFIFOSZ		0x62	/* 8-bit (see masks) */
+#define MUSB_RXFIFOSZ		0x63	/* 8-bit (see masks) */
+#define MUSB_TXFIFOADD		0x64	/* 16-bit offset shifted right 3 */
+#define MUSB_RXFIFOADD		0x66	/* 16-bit offset shifted right 3 */
+
+/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
+#define MUSB_HWVERS		0x6C	/* 8 bit */
+
+#define MUSB_EPINFO		0x78	/* 8 bit */
+#define MUSB_RAMINFO		0x79	/* 8 bit */
+#define MUSB_LINKINFO		0x7a	/* 8 bit */
+#define MUSB_VPLEN		0x7b	/* 8 bit */
+#define MUSB_HS_EOF1		0x7c	/* 8 bit */
+#define MUSB_FS_EOF1		0x7d	/* 8 bit */
+#define MUSB_LS_EOF1		0x7e	/* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP		0x00
+#define MUSB_TXCSR		0x02
+#define MUSB_CSR0		MUSB_TXCSR	/* Re-used for EP0 */
+#define MUSB_RXMAXP		0x04
+#define MUSB_RXCSR		0x06
+#define MUSB_RXCOUNT		0x08
+#define MUSB_COUNT0		MUSB_RXCOUNT	/* Re-used for EP0 */
+#define MUSB_TXTYPE		0x0A
+#define MUSB_TYPE0		MUSB_TXTYPE	/* Re-used for EP0 */
+#define MUSB_TXINTERVAL		0x0B
+#define MUSB_NAKLIMIT0		MUSB_TXINTERVAL	/* Re-used for EP0 */
+#define MUSB_RXTYPE		0x0C
+#define MUSB_RXINTERVAL		0x0D
+#define MUSB_FIFOSIZE		0x0F
+#define MUSB_CONFIGDATA		MUSB_FIFOSIZE	/* Re-used for EP0 */
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSB_INDEXED_OFFSET(_epnum, _offset)	\
+	(0x10 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSB_FLAT_OFFSET(_epnum, _offset)	\
+	(0x100 + (0x10*(_epnum)) + (_offset))
+
+#ifdef CONFIG_USB_TUSB6010
+/* TUSB6010 EP0 configuration register is special */
+#define MUSB_TUSB_OFFSET(_epnum, _offset)	\
+	(0x10 + _offset)
+#include "tusb6010.h"		/* Needed "only" for TUSB_EP0_CONF */
+#endif
+
+/* "bus control"/target registers, for host side multipoint (external hubs) */
+#define MUSB_TXFUNCADDR		0x00
+#define MUSB_TXHUBADDR		0x02
+#define MUSB_TXHUBPORT		0x03
+
+#define MUSB_RXFUNCADDR		0x04
+#define MUSB_RXHUBADDR		0x06
+#define MUSB_RXHUBPORT		0x07
+
+#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
+	(0x80 + (8*(_epnum)) + (_offset))
+
+/*
+ * MUSB Register bits
+ */
+
+/* POWER */
+#define MUSB_POWER_ISOUPDATE	0x80
+#define MUSB_POWER_SOFTCONN	0x40
+#define MUSB_POWER_HSENAB	0x20
+#define MUSB_POWER_HSMODE	0x10
+#define MUSB_POWER_RESET	0x08
+#define MUSB_POWER_RESUME	0x04
+#define MUSB_POWER_SUSPENDM	0x02
+#define MUSB_POWER_ENSUSPEND	0x01
+
+/* INTRUSB */
+#define MUSB_INTR_SUSPEND	0x01
+#define MUSB_INTR_RESUME	0x02
+#define MUSB_INTR_RESET		0x04
+#define MUSB_INTR_BABBLE	0x04
+#define MUSB_INTR_SOF		0x08
+#define MUSB_INTR_CONNECT	0x10
+#define MUSB_INTR_DISCONNECT	0x20
+#define MUSB_INTR_SESSREQ	0x40
+#define MUSB_INTR_VBUSERROR	0x80	/* For SESSION end */
+
+/* DEVCTL */
+#define MUSB_DEVCTL_BDEVICE	0x80
+#define MUSB_DEVCTL_FSDEV	0x40
+#define MUSB_DEVCTL_LSDEV	0x20
+#define MUSB_DEVCTL_VBUS	0x18
+#define MUSB_DEVCTL_VBUS_SHIFT	3
+#define MUSB_DEVCTL_HM		0x04
+#define MUSB_DEVCTL_HR		0x02
+#define MUSB_DEVCTL_SESSION	0x01
+
+/* TESTMODE */
+#define MUSB_TEST_FORCE_HOST	0x80
+#define MUSB_TEST_FIFO_ACCESS	0x40
+#define MUSB_TEST_FORCE_FS	0x20
+#define MUSB_TEST_FORCE_HS	0x10
+#define MUSB_TEST_PACKET	0x08
+#define MUSB_TEST_K		0x04
+#define MUSB_TEST_J		0x02
+#define MUSB_TEST_SE0_NAK	0x01
+
+/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */
+#define MUSB_FIFOSZ_DPB	0x10
+/* Allocation size (8, 16, 32, ... 4096) */
+#define MUSB_FIFOSZ_SIZE	0x0f
+
+/* CSR0 */
+#define MUSB_CSR0_FLUSHFIFO	0x0100
+#define MUSB_CSR0_TXPKTRDY	0x0002
+#define MUSB_CSR0_RXPKTRDY	0x0001
+
+/* CSR0 in Peripheral mode */
+#define MUSB_CSR0_P_SVDSETUPEND	0x0080
+#define MUSB_CSR0_P_SVDRXPKTRDY	0x0040
+#define MUSB_CSR0_P_SENDSTALL	0x0020
+#define MUSB_CSR0_P_SETUPEND	0x0010
+#define MUSB_CSR0_P_DATAEND	0x0008
+#define MUSB_CSR0_P_SENTSTALL	0x0004
+
+/* CSR0 in Host mode */
+#define MUSB_CSR0_H_DIS_PING		0x0800
+#define MUSB_CSR0_H_WR_DATATOGGLE	0x0400	/* Set to allow setting: */
+#define MUSB_CSR0_H_DATATOGGLE		0x0200	/* Data toggle control */
+#define MUSB_CSR0_H_NAKTIMEOUT		0x0080
+#define MUSB_CSR0_H_STATUSPKT		0x0040
+#define MUSB_CSR0_H_REQPKT		0x0020
+#define MUSB_CSR0_H_ERROR		0x0010
+#define MUSB_CSR0_H_SETUPPKT		0x0008
+#define MUSB_CSR0_H_RXSTALL		0x0004
+
+/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_CSR0_P_WZC_BITS	\
+	(MUSB_CSR0_P_SENTSTALL)
+#define MUSB_CSR0_H_WZC_BITS	\
+	(MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \
+	| MUSB_CSR0_RXPKTRDY)
+
+/* TxType/RxType */
+#define MUSB_TYPE_SPEED		0xc0
+#define MUSB_TYPE_SPEED_SHIFT	6
+#define MUSB_TYPE_PROTO		0x30	/* Implicitly zero for ep0 */
+#define MUSB_TYPE_PROTO_SHIFT	4
+#define MUSB_TYPE_REMOTE_END	0xf	/* Implicitly zero for ep0 */
+
+/* CONFIGDATA */
+#define MUSB_CONFIGDATA_MPRXE		0x80	/* Auto bulk pkt combining */
+#define MUSB_CONFIGDATA_MPTXE		0x40	/* Auto bulk pkt splitting */
+#define MUSB_CONFIGDATA_BIGENDIAN	0x20
+#define MUSB_CONFIGDATA_HBRXE		0x10	/* HB-ISO for RX */
+#define MUSB_CONFIGDATA_HBTXE		0x08	/* HB-ISO for TX */
+#define MUSB_CONFIGDATA_DYNFIFO		0x04	/* Dynamic FIFO sizing */
+#define MUSB_CONFIGDATA_SOFTCONE	0x02	/* SoftConnect */
+#define MUSB_CONFIGDATA_UTMIDW		0x01	/* Data width 0/1 => 8/16bits */
+
+/* TXCSR in Peripheral and Host mode */
+#define MUSB_TXCSR_AUTOSET		0x8000
+#define MUSB_TXCSR_MODE			0x2000
+#define MUSB_TXCSR_DMAENAB		0x1000
+#define MUSB_TXCSR_FRCDATATOG		0x0800
+#define MUSB_TXCSR_DMAMODE		0x0400
+#define MUSB_TXCSR_CLRDATATOG		0x0040
+#define MUSB_TXCSR_FLUSHFIFO		0x0008
+#define MUSB_TXCSR_FIFONOTEMPTY		0x0002
+#define MUSB_TXCSR_TXPKTRDY		0x0001
+
+/* TXCSR in Peripheral mode */
+#define MUSB_TXCSR_P_ISO		0x4000
+#define MUSB_TXCSR_P_INCOMPTX		0x0080
+#define MUSB_TXCSR_P_SENTSTALL		0x0020
+#define MUSB_TXCSR_P_SENDSTALL		0x0010
+#define MUSB_TXCSR_P_UNDERRUN		0x0004
+
+/* TXCSR in Host mode */
+#define MUSB_TXCSR_H_WR_DATATOGGLE	0x0200
+#define MUSB_TXCSR_H_DATATOGGLE		0x0100
+#define MUSB_TXCSR_H_NAKTIMEOUT		0x0080
+#define MUSB_TXCSR_H_RXSTALL		0x0020
+#define MUSB_TXCSR_H_ERROR		0x0004
+
+/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_TXCSR_P_WZC_BITS	\
+	(MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \
+	| MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY)
+#define MUSB_TXCSR_H_WZC_BITS	\
+	(MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \
+	| MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY)
+
+/* RXCSR in Peripheral and Host mode */
+#define MUSB_RXCSR_AUTOCLEAR		0x8000
+#define MUSB_RXCSR_DMAENAB		0x2000
+#define MUSB_RXCSR_DISNYET		0x1000
+#define MUSB_RXCSR_PID_ERR		0x1000
+#define MUSB_RXCSR_DMAMODE		0x0800
+#define MUSB_RXCSR_INCOMPRX		0x0100
+#define MUSB_RXCSR_CLRDATATOG		0x0080
+#define MUSB_RXCSR_FLUSHFIFO		0x0010
+#define MUSB_RXCSR_DATAERROR		0x0008
+#define MUSB_RXCSR_FIFOFULL		0x0002
+#define MUSB_RXCSR_RXPKTRDY		0x0001
+
+/* RXCSR in Peripheral mode */
+#define MUSB_RXCSR_P_ISO		0x4000
+#define MUSB_RXCSR_P_SENTSTALL		0x0040
+#define MUSB_RXCSR_P_SENDSTALL		0x0020
+#define MUSB_RXCSR_P_OVERRUN		0x0004
+
+/* RXCSR in Host mode */
+#define MUSB_RXCSR_H_AUTOREQ		0x4000
+#define MUSB_RXCSR_H_WR_DATATOGGLE	0x0400
+#define MUSB_RXCSR_H_DATATOGGLE		0x0200
+#define MUSB_RXCSR_H_RXSTALL		0x0040
+#define MUSB_RXCSR_H_REQPKT		0x0020
+#define MUSB_RXCSR_H_ERROR		0x0004
+
+/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_RXCSR_P_WZC_BITS	\
+	(MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \
+	| MUSB_RXCSR_RXPKTRDY)
+#define MUSB_RXCSR_H_WZC_BITS	\
+	(MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \
+	| MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY)
+
+/* HUBADDR */
+#define MUSB_HUBADDR_MULTI_TT		0x80
+
+#endif	/* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
new file mode 100644
index 0000000..e0e9ce5
--- /dev/null
+++ b/drivers/usb/musb/musb_virthub.c
@@ -0,0 +1,425 @@
+/*
+ * MUSB OTG driver virtual root hub support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#include <asm/unaligned.h>
+
+#include "musb_core.h"
+
+
+static void musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+	u8		power;
+	void __iomem	*mbase = musb->mregs;
+
+	if (!is_host_active(musb))
+		return;
+
+	/* NOTE:  this doesn't necessarily put PHY into low power mode,
+	 * turning off its clock; that's a function of PHY integration and
+	 * MUSB_POWER_ENSUSPEND.  PHY may need a clock (sigh) to detect
+	 * SE0 changing to connect (J) or wakeup (K) states.
+	 */
+	power = musb_readb(mbase, MUSB_POWER);
+	if (do_suspend) {
+		int retries = 10000;
+
+		power &= ~MUSB_POWER_RESUME;
+		power |= MUSB_POWER_SUSPENDM;
+		musb_writeb(mbase, MUSB_POWER, power);
+
+		/* Needed for OPT A tests */
+		power = musb_readb(mbase, MUSB_POWER);
+		while (power & MUSB_POWER_SUSPENDM) {
+			power = musb_readb(mbase, MUSB_POWER);
+			if (retries-- < 1)
+				break;
+		}
+
+		DBG(3, "Root port suspended, power %02x\n", power);
+
+		musb->port1_status |= USB_PORT_STAT_SUSPEND;
+		switch (musb->xceiv.state) {
+		case OTG_STATE_A_HOST:
+			musb->xceiv.state = OTG_STATE_A_SUSPEND;
+			musb->is_active = is_otg_enabled(musb)
+					&& musb->xceiv.host->b_hnp_enable;
+			musb_platform_try_idle(musb, 0);
+			break;
+#ifdef	CONFIG_USB_MUSB_OTG
+		case OTG_STATE_B_HOST:
+			musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+			musb->is_active = is_otg_enabled(musb)
+					&& musb->xceiv.host->b_hnp_enable;
+			musb_platform_try_idle(musb, 0);
+			break;
+#endif
+		default:
+			DBG(1, "bogus rh suspend? %s\n",
+				otg_state_string(musb));
+		}
+	} else if (power & MUSB_POWER_SUSPENDM) {
+		power &= ~MUSB_POWER_SUSPENDM;
+		power |= MUSB_POWER_RESUME;
+		musb_writeb(mbase, MUSB_POWER, power);
+
+		DBG(3, "Root port resuming, power %02x\n", power);
+
+		/* later, GetPortStatus will stop RESUME signaling */
+		musb->port1_status |= MUSB_PORT_STAT_RESUME;
+		musb->rh_timer = jiffies + msecs_to_jiffies(20);
+	}
+}
+
+static void musb_port_reset(struct musb *musb, bool do_reset)
+{
+	u8		power;
+	void __iomem	*mbase = musb->mregs;
+
+#ifdef CONFIG_USB_MUSB_OTG
+	if (musb->xceiv.state == OTG_STATE_B_IDLE) {
+		DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
+		musb->port1_status &= ~USB_PORT_STAT_RESET;
+		return;
+	}
+#endif
+
+	if (!is_host_active(musb))
+		return;
+
+	/* NOTE:  caller guarantees it will turn off the reset when
+	 * the appropriate amount of time has passed
+	 */
+	power = musb_readb(mbase, MUSB_POWER);
+	if (do_reset) {
+
+		/*
+		 * If RESUME is set, we must make sure it stays minimum 20 ms.
+		 * Then we must clear RESUME and wait a bit to let musb start
+		 * generating SOFs. If we don't do this, OPT HS A 6.8 tests
+		 * fail with "Error! Did not receive an SOF before suspend
+		 * detected".
+		 */
+		if (power &  MUSB_POWER_RESUME) {
+			while (time_before(jiffies, musb->rh_timer))
+				msleep(1);
+			musb_writeb(mbase, MUSB_POWER,
+				power & ~MUSB_POWER_RESUME);
+			msleep(1);
+		}
+
+		musb->ignore_disconnect = true;
+		power &= 0xf0;
+		musb_writeb(mbase, MUSB_POWER,
+				power | MUSB_POWER_RESET);
+
+		musb->port1_status |= USB_PORT_STAT_RESET;
+		musb->port1_status &= ~USB_PORT_STAT_ENABLE;
+		musb->rh_timer = jiffies + msecs_to_jiffies(50);
+	} else {
+		DBG(4, "root port reset stopped\n");
+		musb_writeb(mbase, MUSB_POWER,
+				power & ~MUSB_POWER_RESET);
+
+		musb->ignore_disconnect = false;
+
+		power = musb_readb(mbase, MUSB_POWER);
+		if (power & MUSB_POWER_HSMODE) {
+			DBG(4, "high-speed device connected\n");
+			musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
+		}
+
+		musb->port1_status &= ~USB_PORT_STAT_RESET;
+		musb->port1_status |= USB_PORT_STAT_ENABLE
+					| (USB_PORT_STAT_C_RESET << 16)
+					| (USB_PORT_STAT_C_ENABLE << 16);
+		usb_hcd_poll_rh_status(musb_to_hcd(musb));
+
+		musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+	}
+}
+
+void musb_root_disconnect(struct musb *musb)
+{
+	musb->port1_status = (1 << USB_PORT_FEAT_POWER)
+			| (1 << USB_PORT_FEAT_C_CONNECTION);
+
+	usb_hcd_poll_rh_status(musb_to_hcd(musb));
+	musb->is_active = 0;
+
+	switch (musb->xceiv.state) {
+	case OTG_STATE_A_HOST:
+	case OTG_STATE_A_SUSPEND:
+		musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+		musb->is_active = 0;
+		break;
+	case OTG_STATE_A_WAIT_VFALL:
+		musb->xceiv.state = OTG_STATE_B_IDLE;
+		break;
+	default:
+		DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
+	}
+}
+
+
+/*---------------------------------------------------------------------*/
+
+/* Caller may or may not hold musb->lock */
+int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct musb	*musb = hcd_to_musb(hcd);
+	int		retval = 0;
+
+	/* called in_irq() via usb_hcd_poll_rh_status() */
+	if (musb->port1_status & 0xffff0000) {
+		*buf = 0x02;
+		retval = 1;
+	}
+	return retval;
+}
+
+int musb_hub_control(
+	struct usb_hcd	*hcd,
+	u16		typeReq,
+	u16		wValue,
+	u16		wIndex,
+	char		*buf,
+	u16		wLength)
+{
+	struct musb	*musb = hcd_to_musb(hcd);
+	u32		temp;
+	int		retval = 0;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
+		spin_unlock_irqrestore(&musb->lock, flags);
+		return -ESHUTDOWN;
+	}
+
+	/* hub features:  always zero, setting is a NOP
+	 * port features: reported, sometimes updated when host is active
+	 * no indicators
+	 */
+	switch (typeReq) {
+	case ClearHubFeature:
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+		case C_HUB_LOCAL_POWER:
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		if ((wIndex & 0xff) != 1)
+			goto error;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			musb_port_suspend(musb, false);
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+				musb_set_vbus(musb, 0);
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+		case USB_PORT_FEAT_C_ENABLE:
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+		case USB_PORT_FEAT_C_RESET:
+		case USB_PORT_FEAT_C_SUSPEND:
+			break;
+		default:
+			goto error;
+		}
+		DBG(5, "clear feature %d\n", wValue);
+		musb->port1_status &= ~(1 << wValue);
+		break;
+	case GetHubDescriptor:
+		{
+		struct usb_hub_descriptor *desc = (void *)buf;
+
+		desc->bDescLength = 9;
+		desc->bDescriptorType = 0x29;
+		desc->bNbrPorts = 1;
+		desc->wHubCharacteristics = __constant_cpu_to_le16(
+				  0x0001	/* per-port power switching */
+				| 0x0010	/* no overcurrent reporting */
+				);
+		desc->bPwrOn2PwrGood = 5;	/* msec/2 */
+		desc->bHubContrCurrent = 0;
+
+		/* workaround bogus struct definition */
+		desc->DeviceRemovable[0] = 0x02;	/* port 1 */
+		desc->DeviceRemovable[1] = 0xff;
+		}
+		break;
+	case GetHubStatus:
+		temp = 0;
+		*(__le32 *) buf = cpu_to_le32(temp);
+		break;
+	case GetPortStatus:
+		if (wIndex != 1)
+			goto error;
+
+		/* finish RESET signaling? */
+		if ((musb->port1_status & USB_PORT_STAT_RESET)
+				&& time_after_eq(jiffies, musb->rh_timer))
+			musb_port_reset(musb, false);
+
+		/* finish RESUME signaling? */
+		if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
+				&& time_after_eq(jiffies, musb->rh_timer)) {
+			u8		power;
+
+			power = musb_readb(musb->mregs, MUSB_POWER);
+			power &= ~MUSB_POWER_RESUME;
+			DBG(4, "root port resume stopped, power %02x\n",
+					power);
+			musb_writeb(musb->mregs, MUSB_POWER, power);
+
+			/* ISSUE:  DaVinci (RTL 1.300) disconnects after
+			 * resume of high speed peripherals (but not full
+			 * speed ones).
+			 */
+
+			musb->is_active = 1;
+			musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+					| MUSB_PORT_STAT_RESUME);
+			musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+			usb_hcd_poll_rh_status(musb_to_hcd(musb));
+			/* NOTE: it might really be A_WAIT_BCON ... */
+			musb->xceiv.state = OTG_STATE_A_HOST;
+		}
+
+		put_unaligned(cpu_to_le32(musb->port1_status
+					& ~MUSB_PORT_STAT_RESUME),
+				(__le32 *) buf);
+
+		/* port change status is more interesting */
+		DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
+				musb->port1_status);
+		break;
+	case SetPortFeature:
+		if ((wIndex & 0xff) != 1)
+			goto error;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_POWER:
+			/* NOTE: this controller has a strange state machine
+			 * that involves "requesting sessions" according to
+			 * magic side effects from incompletely-described
+			 * rules about startup...
+			 *
+			 * This call is what really starts the host mode; be
+			 * very careful about side effects if you reorder any
+			 * initialization logic, e.g. for OTG, or change any
+			 * logic relating to VBUS power-up.
+			 */
+			if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+				musb_start(musb);
+			break;
+		case USB_PORT_FEAT_RESET:
+			musb_port_reset(musb, true);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			musb_port_suspend(musb, true);
+			break;
+		case USB_PORT_FEAT_TEST:
+			if (unlikely(is_host_active(musb)))
+				goto error;
+
+			wIndex >>= 8;
+			switch (wIndex) {
+			case 1:
+				pr_debug("TEST_J\n");
+				temp = MUSB_TEST_J;
+				break;
+			case 2:
+				pr_debug("TEST_K\n");
+				temp = MUSB_TEST_K;
+				break;
+			case 3:
+				pr_debug("TEST_SE0_NAK\n");
+				temp = MUSB_TEST_SE0_NAK;
+				break;
+			case 4:
+				pr_debug("TEST_PACKET\n");
+				temp = MUSB_TEST_PACKET;
+				musb_load_testpacket(musb);
+				break;
+			case 5:
+				pr_debug("TEST_FORCE_ENABLE\n");
+				temp = MUSB_TEST_FORCE_HOST
+					| MUSB_TEST_FORCE_HS;
+
+				musb_writeb(musb->mregs, MUSB_DEVCTL,
+						MUSB_DEVCTL_SESSION);
+				break;
+			case 6:
+				pr_debug("TEST_FIFO_ACCESS\n");
+				temp = MUSB_TEST_FIFO_ACCESS;
+				break;
+			default:
+				goto error;
+			}
+			musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
+			break;
+		default:
+			goto error;
+		}
+		DBG(5, "set feature %d\n", wValue);
+		musb->port1_status |= 1 << wValue;
+		break;
+
+	default:
+error:
+		/* "protocol stall" on error */
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return retval;
+}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
new file mode 100644
index 0000000..9ba8fb7
--- /dev/null
+++ b/drivers/usb/musb/musbhsdma.c
@@ -0,0 +1,433 @@
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include "musb_core.h"
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include "omap2430.h"
+#endif
+
+#define MUSB_HSDMA_BASE		0x200
+#define MUSB_HSDMA_INTR		(MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL		0x4
+#define MUSB_HSDMA_ADDRESS		0x8
+#define MUSB_HSDMA_COUNT		0xc
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset)		\
+		(MUSB_HSDMA_BASE + (_bChannel << 4) + _offset)
+
+/* control register (16-bit): */
+#define MUSB_HSDMA_ENABLE_SHIFT		0
+#define MUSB_HSDMA_TRANSMIT_SHIFT		1
+#define MUSB_HSDMA_MODE1_SHIFT		2
+#define MUSB_HSDMA_IRQENABLE_SHIFT		3
+#define MUSB_HSDMA_ENDPOINT_SHIFT		4
+#define MUSB_HSDMA_BUSERROR_SHIFT		8
+#define MUSB_HSDMA_BURSTMODE_SHIFT		9
+#define MUSB_HSDMA_BURSTMODE		(3 << MUSB_HSDMA_BURSTMODE_SHIFT)
+#define MUSB_HSDMA_BURSTMODE_UNSPEC	0
+#define MUSB_HSDMA_BURSTMODE_INCR4	1
+#define MUSB_HSDMA_BURSTMODE_INCR8	2
+#define MUSB_HSDMA_BURSTMODE_INCR16	3
+
+#define MUSB_HSDMA_CHANNELS		8
+
+struct musb_dma_controller;
+
+struct musb_dma_channel {
+	struct dma_channel		Channel;
+	struct musb_dma_controller	*controller;
+	u32				dwStartAddress;
+	u32				len;
+	u16				wMaxPacketSize;
+	u8				bIndex;
+	u8				epnum;
+	u8				transmit;
+};
+
+struct musb_dma_controller {
+	struct dma_controller		Controller;
+	struct musb_dma_channel		aChannel[MUSB_HSDMA_CHANNELS];
+	void				*pDmaPrivate;
+	void __iomem			*pCoreBase;
+	u8				bChannelCount;
+	u8				bmUsedChannels;
+	u8				irq;
+};
+
+static int dma_controller_start(struct dma_controller *c)
+{
+	/* nothing to do */
+	return 0;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel);
+
+static int dma_controller_stop(struct dma_controller *c)
+{
+	struct musb_dma_controller *controller =
+		container_of(c, struct musb_dma_controller, Controller);
+	struct musb *musb = (struct musb *) controller->pDmaPrivate;
+	struct dma_channel *pChannel;
+	u8 bBit;
+
+	if (controller->bmUsedChannels != 0) {
+		dev_err(musb->controller,
+			"Stopping DMA controller while channel active\n");
+
+		for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+			if (controller->bmUsedChannels & (1 << bBit)) {
+				pChannel = &controller->aChannel[bBit].Channel;
+				dma_channel_release(pChannel);
+
+				if (!controller->bmUsedChannels)
+					break;
+			}
+		}
+	}
+	return 0;
+}
+
+static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
+				struct musb_hw_ep *hw_ep, u8 transmit)
+{
+	u8 bBit;
+	struct dma_channel *pChannel = NULL;
+	struct musb_dma_channel *pImplChannel = NULL;
+	struct musb_dma_controller *controller =
+			container_of(c, struct musb_dma_controller, Controller);
+
+	for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+		if (!(controller->bmUsedChannels & (1 << bBit))) {
+			controller->bmUsedChannels |= (1 << bBit);
+			pImplChannel = &(controller->aChannel[bBit]);
+			pImplChannel->controller = controller;
+			pImplChannel->bIndex = bBit;
+			pImplChannel->epnum = hw_ep->epnum;
+			pImplChannel->transmit = transmit;
+			pChannel = &(pImplChannel->Channel);
+			pChannel->private_data = pImplChannel;
+			pChannel->status = MUSB_DMA_STATUS_FREE;
+			pChannel->max_len = 0x10000;
+			/* Tx => mode 1; Rx => mode 0 */
+			pChannel->desired_mode = transmit;
+			pChannel->actual_len = 0;
+			break;
+		}
+	}
+	return pChannel;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel)
+{
+	struct musb_dma_channel *pImplChannel =
+		(struct musb_dma_channel *) pChannel->private_data;
+
+	pChannel->actual_len = 0;
+	pImplChannel->dwStartAddress = 0;
+	pImplChannel->len = 0;
+
+	pImplChannel->controller->bmUsedChannels &=
+		~(1 << pImplChannel->bIndex);
+
+	pChannel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+static void configure_channel(struct dma_channel *pChannel,
+				u16 packet_sz, u8 mode,
+				dma_addr_t dma_addr, u32 len)
+{
+	struct musb_dma_channel *pImplChannel =
+		(struct musb_dma_channel *) pChannel->private_data;
+	struct musb_dma_controller *controller = pImplChannel->controller;
+	void __iomem *mbase = controller->pCoreBase;
+	u8 bChannel = pImplChannel->bIndex;
+	u16 csr = 0;
+
+	DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
+			pChannel, packet_sz, dma_addr, len, mode);
+
+	if (mode) {
+		csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
+		BUG_ON(len < packet_sz);
+
+		if (packet_sz >= 64) {
+			csr |= MUSB_HSDMA_BURSTMODE_INCR16
+					<< MUSB_HSDMA_BURSTMODE_SHIFT;
+		} else if (packet_sz >= 32) {
+			csr |= MUSB_HSDMA_BURSTMODE_INCR8
+					<< MUSB_HSDMA_BURSTMODE_SHIFT;
+		} else if (packet_sz >= 16) {
+			csr |= MUSB_HSDMA_BURSTMODE_INCR4
+					<< MUSB_HSDMA_BURSTMODE_SHIFT;
+		}
+	}
+
+	csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
+		| (1 << MUSB_HSDMA_ENABLE_SHIFT)
+		| (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
+		| (pImplChannel->transmit
+				? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
+				: 0);
+
+	/* address/count */
+	musb_writel(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+		dma_addr);
+	musb_writel(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+		len);
+
+	/* control (this should start things) */
+	musb_writew(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+		csr);
+}
+
+static int dma_channel_program(struct dma_channel *pChannel,
+				u16 packet_sz, u8 mode,
+				dma_addr_t dma_addr, u32 len)
+{
+	struct musb_dma_channel *pImplChannel =
+			(struct musb_dma_channel *) pChannel->private_data;
+
+	DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
+		pImplChannel->epnum,
+		pImplChannel->transmit ? "Tx" : "Rx",
+		packet_sz, dma_addr, len, mode);
+
+	BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN ||
+		pChannel->status == MUSB_DMA_STATUS_BUSY);
+
+	pChannel->actual_len = 0;
+	pImplChannel->dwStartAddress = dma_addr;
+	pImplChannel->len = len;
+	pImplChannel->wMaxPacketSize = packet_sz;
+	pChannel->status = MUSB_DMA_STATUS_BUSY;
+
+	if ((mode == 1) && (len >= packet_sz))
+		configure_channel(pChannel, packet_sz, 1, dma_addr, len);
+	else
+		configure_channel(pChannel, packet_sz, 0, dma_addr, len);
+
+	return true;
+}
+
+static int dma_channel_abort(struct dma_channel *pChannel)
+{
+	struct musb_dma_channel *pImplChannel =
+		(struct musb_dma_channel *) pChannel->private_data;
+	u8 bChannel = pImplChannel->bIndex;
+	void __iomem *mbase = pImplChannel->controller->pCoreBase;
+	u16 csr;
+
+	if (pChannel->status == MUSB_DMA_STATUS_BUSY) {
+		if (pImplChannel->transmit) {
+
+			csr = musb_readw(mbase,
+				MUSB_EP_OFFSET(pImplChannel->epnum,
+						MUSB_TXCSR));
+			csr &= ~(MUSB_TXCSR_AUTOSET |
+				 MUSB_TXCSR_DMAENAB |
+				 MUSB_TXCSR_DMAMODE);
+			musb_writew(mbase,
+				MUSB_EP_OFFSET(pImplChannel->epnum,
+						MUSB_TXCSR),
+				csr);
+		} else {
+			csr = musb_readw(mbase,
+				MUSB_EP_OFFSET(pImplChannel->epnum,
+						MUSB_RXCSR));
+			csr &= ~(MUSB_RXCSR_AUTOCLEAR |
+				 MUSB_RXCSR_DMAENAB |
+				 MUSB_RXCSR_DMAMODE);
+			musb_writew(mbase,
+				MUSB_EP_OFFSET(pImplChannel->epnum,
+						MUSB_RXCSR),
+				csr);
+		}
+
+		musb_writew(mbase,
+			MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+			0);
+		musb_writel(mbase,
+			MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+			0);
+		musb_writel(mbase,
+			MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+			0);
+
+		pChannel->status = MUSB_DMA_STATUS_FREE;
+	}
+	return 0;
+}
+
+static irqreturn_t dma_controller_irq(int irq, void *private_data)
+{
+	struct musb_dma_controller *controller =
+		(struct musb_dma_controller *)private_data;
+	struct musb_dma_channel *pImplChannel;
+	struct musb *musb = controller->pDmaPrivate;
+	void __iomem *mbase = controller->pCoreBase;
+	struct dma_channel *pChannel;
+	u8 bChannel;
+	u16 csr;
+	u32 dwAddress;
+	u8 int_hsdma;
+	irqreturn_t retval = IRQ_NONE;
+	unsigned long flags;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+	if (!int_hsdma)
+		goto done;
+
+	for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) {
+		if (int_hsdma & (1 << bChannel)) {
+			pImplChannel = (struct musb_dma_channel *)
+					&(controller->aChannel[bChannel]);
+			pChannel = &pImplChannel->Channel;
+
+			csr = musb_readw(mbase,
+					MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
+							MUSB_HSDMA_CONTROL));
+
+			if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT))
+				pImplChannel->Channel.status =
+					MUSB_DMA_STATUS_BUS_ABORT;
+			else {
+				u8 devctl;
+
+				dwAddress = musb_readl(mbase,
+						MUSB_HSDMA_CHANNEL_OFFSET(
+							bChannel,
+							MUSB_HSDMA_ADDRESS));
+				pChannel->actual_len = dwAddress
+					- pImplChannel->dwStartAddress;
+
+				DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
+					pChannel, pImplChannel->dwStartAddress,
+					dwAddress, pChannel->actual_len,
+					pImplChannel->len,
+					(pChannel->actual_len
+						< pImplChannel->len) ?
+					"=> reconfig 0" : "=> complete");
+
+				devctl = musb_readb(mbase, MUSB_DEVCTL);
+
+				pChannel->status = MUSB_DMA_STATUS_FREE;
+
+				/* completed */
+				if ((devctl & MUSB_DEVCTL_HM)
+					&& (pImplChannel->transmit)
+					&& ((pChannel->desired_mode == 0)
+					    || (pChannel->actual_len &
+					    (pImplChannel->wMaxPacketSize - 1)))
+					 ) {
+					/* Send out the packet */
+					musb_ep_select(mbase,
+						pImplChannel->epnum);
+					musb_writew(mbase, MUSB_EP_OFFSET(
+							pImplChannel->epnum,
+							MUSB_TXCSR),
+						MUSB_TXCSR_TXPKTRDY);
+				} else
+					musb_dma_completion(
+						musb,
+						pImplChannel->epnum,
+						pImplChannel->transmit);
+			}
+		}
+	}
+	retval = IRQ_HANDLED;
+done:
+	spin_unlock_irqrestore(&musb->lock, flags);
+	return retval;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+	struct musb_dma_controller *controller;
+
+	controller = container_of(c, struct musb_dma_controller, Controller);
+	if (!controller)
+		return;
+
+	if (controller->irq)
+		free_irq(controller->irq, c);
+
+	kfree(controller);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *pCoreBase)
+{
+	struct musb_dma_controller *controller;
+	struct device *dev = musb->controller;
+	struct platform_device *pdev = to_platform_device(dev);
+	int irq = platform_get_irq(pdev, 1);
+
+	if (irq == 0) {
+		dev_err(dev, "No DMA interrupt line!\n");
+		return NULL;
+	}
+
+	controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL);
+	if (!controller)
+		return NULL;
+
+	controller->bChannelCount = MUSB_HSDMA_CHANNELS;
+	controller->pDmaPrivate = musb;
+	controller->pCoreBase = pCoreBase;
+
+	controller->Controller.start = dma_controller_start;
+	controller->Controller.stop = dma_controller_stop;
+	controller->Controller.channel_alloc = dma_channel_allocate;
+	controller->Controller.channel_release = dma_channel_release;
+	controller->Controller.channel_program = dma_channel_program;
+	controller->Controller.channel_abort = dma_channel_abort;
+
+	if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
+			musb->controller->bus_id, &controller->Controller)) {
+		dev_err(dev, "request_irq %d failed!\n", irq);
+		dma_controller_destroy(&controller->Controller);
+		return NULL;
+	}
+
+	controller->irq = irq;
+
+	return &controller->Controller;
+}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
new file mode 100644
index 0000000..298b22e
--- /dev/null
+++ b/drivers/usb/musb/omap2430.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2005-2007 by Texas Instruments
+ * Some code has been taken from tusb6010.c
+ * Copyrights for that are attributable to:
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+#include "omap2430.h"
+
+#ifdef CONFIG_ARCH_OMAP3430
+#define	get_cpu_rev()	2
+#endif
+
+#define MUSB_TIMEOUT_A_WAIT_BCON	1100
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+	struct musb	*musb = (void *)_musb;
+	unsigned long	flags;
+	u8	power;
+	u8	devctl;
+
+	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	switch (musb->xceiv.state) {
+	case OTG_STATE_A_WAIT_BCON:
+		devctl &= ~MUSB_DEVCTL_SESSION;
+		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+		if (devctl & MUSB_DEVCTL_BDEVICE) {
+			musb->xceiv.state = OTG_STATE_B_IDLE;
+			MUSB_DEV_MODE(musb);
+		} else {
+			musb->xceiv.state = OTG_STATE_A_IDLE;
+			MUSB_HST_MODE(musb);
+		}
+		break;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	case OTG_STATE_A_SUSPEND:
+		/* finish RESUME signaling? */
+		if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
+			power = musb_readb(musb->mregs, MUSB_POWER);
+			power &= ~MUSB_POWER_RESUME;
+			DBG(1, "root port resume stopped, power %02x\n", power);
+			musb_writeb(musb->mregs, MUSB_POWER, power);
+			musb->is_active = 1;
+			musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+						| MUSB_PORT_STAT_RESUME);
+			musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+			usb_hcd_poll_rh_status(musb_to_hcd(musb));
+			/* NOTE: it might really be A_WAIT_BCON ... */
+			musb->xceiv.state = OTG_STATE_A_HOST;
+		}
+		break;
+#endif
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	case OTG_STATE_A_HOST:
+		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+		if (devctl &  MUSB_DEVCTL_BDEVICE)
+			musb->xceiv.state = OTG_STATE_B_IDLE;
+		else
+			musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+#endif
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+	unsigned long		default_timeout = jiffies + msecs_to_jiffies(3);
+	static unsigned long	last_timer;
+
+	if (timeout == 0)
+		timeout = default_timeout;
+
+	/* Never idle if active, or when VBUS timeout is not set as host */
+	if (musb->is_active || ((musb->a_wait_bcon == 0)
+			&& (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+		DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+		del_timer(&musb_idle_timer);
+		last_timer = jiffies;
+		return;
+	}
+
+	if (time_after(last_timer, timeout)) {
+		if (!timer_pending(&musb_idle_timer))
+			last_timer = timeout;
+		else {
+			DBG(4, "Longer idle timer already pending, ignoring\n");
+			return;
+		}
+	}
+	last_timer = timeout;
+
+	DBG(4, "%s inactive, for idle timer for %lu ms\n",
+		otg_state_string(musb),
+		(unsigned long)jiffies_to_msecs(timeout - jiffies));
+	mod_timer(&musb_idle_timer, timeout);
+}
+
+void musb_platform_enable(struct musb *musb)
+{
+}
+void musb_platform_disable(struct musb *musb)
+{
+}
+static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
+{
+}
+
+static void omap_set_vbus(struct musb *musb, int is_on)
+{
+	u8		devctl;
+	/* HDRC controls CPEN, but beware current surges during device
+	 * connect.  They can trigger transient overcurrent conditions
+	 * that must be ignored.
+	 */
+
+	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+	if (is_on) {
+		musb->is_active = 1;
+		musb->xceiv.default_a = 1;
+		musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+		devctl |= MUSB_DEVCTL_SESSION;
+
+		MUSB_HST_MODE(musb);
+	} else {
+		musb->is_active = 0;
+
+		/* NOTE:  we're skipping A_WAIT_VFALL -> A_IDLE and
+		 * jumping right to B_IDLE...
+		 */
+
+		musb->xceiv.default_a = 0;
+		musb->xceiv.state = OTG_STATE_B_IDLE;
+		devctl &= ~MUSB_DEVCTL_SESSION;
+
+		MUSB_DEV_MODE(musb);
+	}
+	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+	DBG(1, "VBUS %s, devctl %02x "
+		/* otg %3x conf %08x prcm %08x */ "\n",
+		otg_state_string(musb),
+		musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+static int omap_set_power(struct otg_transceiver *x, unsigned mA)
+{
+	return 0;
+}
+
+static int musb_platform_resume(struct musb *musb);
+
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+	u8	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+	devctl |= MUSB_DEVCTL_SESSION;
+	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+	switch (musb_mode) {
+	case MUSB_HOST:
+		otg_set_host(&musb->xceiv, musb->xceiv.host);
+		break;
+	case MUSB_PERIPHERAL:
+		otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
+		break;
+	case MUSB_OTG:
+		break;
+	}
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+	u32 l;
+
+#if defined(CONFIG_ARCH_OMAP2430)
+	omap_cfg_reg(AE5_2430_USB0HS_STP);
+#endif
+
+	musb_platform_resume(musb);
+
+	l = omap_readl(OTG_SYSCONFIG);
+	l &= ~ENABLEWAKEUP;	/* disable wakeup */
+	l &= ~NOSTDBY;		/* remove possible nostdby */
+	l |= SMARTSTDBY;	/* enable smart standby */
+	l &= ~AUTOIDLE;		/* disable auto idle */
+	l &= ~NOIDLE;		/* remove possible noidle */
+	l |= SMARTIDLE;		/* enable smart idle */
+	l |= AUTOIDLE;		/* enable auto idle */
+	omap_writel(l, OTG_SYSCONFIG);
+
+	l = omap_readl(OTG_INTERFSEL);
+	l |= ULPI_12PIN;
+	omap_writel(l, OTG_INTERFSEL);
+
+	pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+			"sysstatus 0x%x, intrfsel 0x%x, simenable  0x%x\n",
+			omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
+			omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
+			omap_readl(OTG_SIMENABLE));
+
+	omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
+
+	if (is_host_enabled(musb))
+		musb->board_set_vbus = omap_set_vbus;
+	if (is_peripheral_enabled(musb))
+		musb->xceiv.set_power = omap_set_power;
+	musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
+
+	setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+	return 0;
+}
+
+int musb_platform_suspend(struct musb *musb)
+{
+	u32 l;
+
+	if (!musb->clock)
+		return 0;
+
+	/* in any role */
+	l = omap_readl(OTG_FORCESTDBY);
+	l |= ENABLEFORCE;	/* enable MSTANDBY */
+	omap_writel(l, OTG_FORCESTDBY);
+
+	l = omap_readl(OTG_SYSCONFIG);
+	l |= ENABLEWAKEUP;	/* enable wakeup */
+	omap_writel(l, OTG_SYSCONFIG);
+
+	if (musb->xceiv.set_suspend)
+		musb->xceiv.set_suspend(&musb->xceiv, 1);
+
+	if (musb->set_clock)
+		musb->set_clock(musb->clock, 0);
+	else
+		clk_disable(musb->clock);
+
+	return 0;
+}
+
+static int musb_platform_resume(struct musb *musb)
+{
+	u32 l;
+
+	if (!musb->clock)
+		return 0;
+
+	if (musb->xceiv.set_suspend)
+		musb->xceiv.set_suspend(&musb->xceiv, 0);
+
+	if (musb->set_clock)
+		musb->set_clock(musb->clock, 1);
+	else
+		clk_enable(musb->clock);
+
+	l = omap_readl(OTG_SYSCONFIG);
+	l &= ~ENABLEWAKEUP;	/* disable wakeup */
+	omap_writel(l, OTG_SYSCONFIG);
+
+	l = omap_readl(OTG_FORCESTDBY);
+	l &= ~ENABLEFORCE;	/* disable MSTANDBY */
+	omap_writel(l, OTG_FORCESTDBY);
+
+	return 0;
+}
+
+
+int musb_platform_exit(struct musb *musb)
+{
+
+	omap_vbus_power(musb, 0 /*off*/, 1);
+
+	musb_platform_suspend(musb);
+
+	clk_put(musb->clock);
+	musb->clock = 0;
+
+	return 0;
+}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
new file mode 100644
index 0000000..786a620
--- /dev/null
+++ b/drivers/usb/musb/omap2430.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_OMAP243X_H__
+#define __MUSB_OMAP243X_H__
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include <asm/arch/hardware.h>
+#include <asm/arch/usb.h>
+
+/*
+ * OMAP2430-specific definitions
+ */
+
+#define MENTOR_BASE_OFFSET	0
+#if	defined(CONFIG_ARCH_OMAP2430)
+#define	OMAP_HSOTG_BASE		(OMAP243X_HS_BASE)
+#elif	defined(CONFIG_ARCH_OMAP3430)
+#define	OMAP_HSOTG_BASE		(OMAP34XX_HSUSB_OTG_BASE)
+#endif
+#define OMAP_HSOTG(offset)	(OMAP_HSOTG_BASE + 0x400 + (offset))
+#define OTG_REVISION		OMAP_HSOTG(0x0)
+#define OTG_SYSCONFIG		OMAP_HSOTG(0x4)
+#	define	MIDLEMODE	12	/* bit position */
+#	define	FORCESTDBY		(0 << MIDLEMODE)
+#	define	NOSTDBY			(1 << MIDLEMODE)
+#	define	SMARTSTDBY		(2 << MIDLEMODE)
+#	define	SIDLEMODE		3	/* bit position */
+#	define	FORCEIDLE		(0 << SIDLEMODE)
+#	define	NOIDLE			(1 << SIDLEMODE)
+#	define	SMARTIDLE		(2 << SIDLEMODE)
+#	define	ENABLEWAKEUP		(1 << 2)
+#	define	SOFTRST			(1 << 1)
+#	define	AUTOIDLE		(1 << 0)
+#define OTG_SYSSTATUS		OMAP_HSOTG(0x8)
+#	define	RESETDONE		(1 << 0)
+#define OTG_INTERFSEL		OMAP_HSOTG(0xc)
+#	define	EXTCP			(1 << 2)
+#	define	PHYSEL		0	/* bit position */
+#	define	UTMI_8BIT		(0 << PHYSEL)
+#	define	ULPI_12PIN		(1 << PHYSEL)
+#	define	ULPI_8PIN		(2 << PHYSEL)
+#define OTG_SIMENABLE		OMAP_HSOTG(0x10)
+#	define	TM1			(1 << 0)
+#define OTG_FORCESTDBY		OMAP_HSOTG(0x14)
+#	define	ENABLEFORCE		(1 << 0)
+
+#endif	/* CONFIG_ARCH_OMAP2430 */
+
+#endif	/* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
new file mode 100644
index 0000000..b73b036
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.c
@@ -0,0 +1,1151 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Notes:
+ * - Driver assumes that interface to external host (main CPU) is
+ *   configured for NOR FLASH interface instead of VLYNQ serial
+ *   interface.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+#include "musb_core.h"
+
+static void tusb_source_power(struct musb *musb, int is_on);
+
+#define TUSB_REV_MAJOR(reg_val)		((reg_val >> 4) & 0xf)
+#define TUSB_REV_MINOR(reg_val)		(reg_val & 0xf)
+
+/*
+ * Checks the revision. We need to use the DMA register as 3.0 does not
+ * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
+ */
+u8 tusb_get_revision(struct musb *musb)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+	u32		die_id;
+	u8		rev;
+
+	rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
+	if (TUSB_REV_MAJOR(rev) == 3) {
+		die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
+				TUSB_DIDR1_HI));
+		if (die_id >= TUSB_DIDR1_HI_REV_31)
+			rev |= 1;
+	}
+
+	return rev;
+}
+
+static int __init tusb_print_revision(struct musb *musb)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+	u8		rev;
+
+	rev = tusb_get_revision(musb);
+
+	pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
+		"prcm",
+		TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
+		TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
+		"int",
+		TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+		TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+		"gpio",
+		TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
+		TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
+		"dma",
+		TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+		TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+		"dieid",
+		TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
+		"rev",
+		TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
+
+	return tusb_get_revision(musb);
+}
+
+#define WBUS_QUIRK_MASK	(TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
+				| TUSB_PHY_OTG_CTRL_TESTM0)
+
+/*
+ * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
+ * Disables power detection in PHY for the duration of idle.
+ */
+static void tusb_wbus_quirk(struct musb *musb, int enabled)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+	static u32	phy_otg_ctrl, phy_otg_ena;
+	u32		tmp;
+
+	if (enabled) {
+		phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+		phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+		tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
+				| phy_otg_ena | WBUS_QUIRK_MASK;
+		musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+		tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
+		tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
+		musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+		DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
+			musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+			musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+	} else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
+					& TUSB_PHY_OTG_CTRL_TESTM2) {
+		tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
+		musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+		tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
+		musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+		DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
+			musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+			musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+		phy_otg_ctrl = 0;
+		phy_otg_ena = 0;
+	}
+}
+
+/*
+ * TUSB 6010 may use a parallel bus that doesn't support byte ops;
+ * so both loading and unloading FIFOs need explicit byte counts.
+ */
+
+static inline void
+tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
+{
+	u32		val;
+	int		i;
+
+	if (len > 4) {
+		for (i = 0; i < (len >> 2); i++) {
+			memcpy(&val, buf, 4);
+			musb_writel(fifo, 0, val);
+			buf += 4;
+		}
+		len %= 4;
+	}
+	if (len > 0) {
+		/* Write the rest 1 - 3 bytes to FIFO */
+		memcpy(&val, buf, len);
+		musb_writel(fifo, 0, val);
+	}
+}
+
+static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
+						void __iomem *buf, u16 len)
+{
+	u32		val;
+	int		i;
+
+	if (len > 4) {
+		for (i = 0; i < (len >> 2); i++) {
+			val = musb_readl(fifo, 0);
+			memcpy(buf, &val, 4);
+			buf += 4;
+		}
+		len %= 4;
+	}
+	if (len > 0) {
+		/* Read the rest 1 - 3 bytes from FIFO */
+		val = musb_readl(fifo, 0);
+		memcpy(buf, &val, len);
+	}
+}
+
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
+{
+	void __iomem	*ep_conf = hw_ep->conf;
+	void __iomem	*fifo = hw_ep->fifo;
+	u8		epnum = hw_ep->epnum;
+
+	prefetch(buf);
+
+	DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+			'T', epnum, fifo, len, buf);
+
+	if (epnum)
+		musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+			TUSB_EP_CONFIG_XFR_SIZE(len));
+	else
+		musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
+			TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+	if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+		/* Best case is 32bit-aligned destination address */
+		if ((0x02 & (unsigned long) buf) == 0) {
+			if (len >= 4) {
+				writesl(fifo, buf, len >> 2);
+				buf += (len & ~0x03);
+				len &= 0x03;
+			}
+		} else {
+			if (len >= 2) {
+				u32 val;
+				int i;
+
+				/* Cannot use writesw, fifo is 32-bit */
+				for (i = 0; i < (len >> 2); i++) {
+					val = (u32)(*(u16 *)buf);
+					buf += 2;
+					val |= (*(u16 *)buf) << 16;
+					buf += 2;
+					musb_writel(fifo, 0, val);
+				}
+				len &= 0x03;
+			}
+		}
+	}
+
+	if (len > 0)
+		tusb_fifo_write_unaligned(fifo, buf, len);
+}
+
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
+{
+	void __iomem	*ep_conf = hw_ep->conf;
+	void __iomem	*fifo = hw_ep->fifo;
+	u8		epnum = hw_ep->epnum;
+
+	DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+			'R', epnum, fifo, len, buf);
+
+	if (epnum)
+		musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+			TUSB_EP_CONFIG_XFR_SIZE(len));
+	else
+		musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+	if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+		/* Best case is 32bit-aligned destination address */
+		if ((0x02 & (unsigned long) buf) == 0) {
+			if (len >= 4) {
+				readsl(fifo, buf, len >> 2);
+				buf += (len & ~0x03);
+				len &= 0x03;
+			}
+		} else {
+			if (len >= 2) {
+				u32 val;
+				int i;
+
+				/* Cannot use readsw, fifo is 32-bit */
+				for (i = 0; i < (len >> 2); i++) {
+					val = musb_readl(fifo, 0);
+					*(u16 *)buf = (u16)(val & 0xffff);
+					buf += 2;
+					*(u16 *)buf = (u16)(val >> 16);
+					buf += 2;
+				}
+				len &= 0x03;
+			}
+		}
+	}
+
+	if (len > 0)
+		tusb_fifo_read_unaligned(fifo, buf, len);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* This is used by gadget drivers, and OTG transceiver logic, allowing
+ * at most mA current to be drawn from VBUS during a Default-B session
+ * (that is, while VBUS exceeds 4.4V).  In Default-A (including pure host
+ * mode), or low power Default-B sessions, something else supplies power.
+ * Caller must take care of locking.
+ */
+static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
+{
+	struct musb	*musb = container_of(x, struct musb, xceiv);
+	void __iomem	*tbase = musb->ctrl_base;
+	u32		reg;
+
+	/*
+	 * Keep clock active when enabled. Note that this is not tied to
+	 * drawing VBUS, as with OTG mA can be less than musb->min_power.
+	 */
+	if (musb->set_clock) {
+		if (mA)
+			musb->set_clock(musb->clock, 1);
+		else
+			musb->set_clock(musb->clock, 0);
+	}
+
+	/* tps65030 seems to consume max 100mA, with maybe 60mA available
+	 * (measured on one board) for things other than tps and tusb.
+	 *
+	 * Boards sharing the CPU clock with CLKIN will need to prevent
+	 * certain idle sleep states while the USB link is active.
+	 *
+	 * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
+	 * The actual current usage would be very board-specific.  For now,
+	 * it's simpler to just use an aggregate (also board-specific).
+	 */
+	if (x->default_a || mA < (musb->min_power << 1))
+		mA = 0;
+
+	reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+	if (mA) {
+		musb->is_bus_powered = 1;
+		reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
+	} else {
+		musb->is_bus_powered = 0;
+		reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+	}
+	musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+	DBG(2, "draw max %d mA VBUS\n", mA);
+	return 0;
+}
+
+#else
+#define tusb_draw_power	NULL
+#endif
+
+/* workaround for issue 13:  change clock during chip idle
+ * (to be fixed in rev3 silicon) ... symptoms include disconnect
+ * or looping suspend/resume cycles
+ */
+static void tusb_set_clock_source(struct musb *musb, unsigned mode)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+	u32		reg;
+
+	reg = musb_readl(tbase, TUSB_PRCM_CONF);
+	reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
+
+	/* 0 = refclk (clkin, XI)
+	 * 1 = PHY 60 MHz (internal PLL)
+	 * 2 = not supported
+	 * 3 = what?
+	 */
+	if (mode > 0)
+		reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
+
+	musb_writel(tbase, TUSB_PRCM_CONF, reg);
+
+	/* FIXME tusb6010_platform_retime(mode == 0); */
+}
+
+/*
+ * Idle TUSB6010 until next wake-up event; NOR access always wakes.
+ * Other code ensures that we idle unless we're connected _and_ the
+ * USB link is not suspended ... and tells us the relevant wakeup
+ * events.  SW_EN for voltage is handled separately.
+ */
+void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+	u32		reg;
+
+	if ((wakeup_enables & TUSB_PRCM_WBUS)
+			&& (tusb_get_revision(musb) == TUSB_REV_30))
+		tusb_wbus_quirk(musb, 1);
+
+	tusb_set_clock_source(musb, 0);
+
+	wakeup_enables |= TUSB_PRCM_WNORCS;
+	musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
+
+	/* REVISIT writeup of WID implies that if WID set and ID is grounded,
+	 * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
+	 * Presumably that's mostly to save power, hence WID is immaterial ...
+	 */
+
+	reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+	/* issue 4: when driving vbus, use hipower (vbus_det) comparator */
+	if (is_host_active(musb)) {
+		reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+		reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+	} else {
+		reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+		reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+	}
+	reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
+	musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+	DBG(6, "idle, wake on %02x\n", wakeup_enables);
+}
+
+/*
+ * Updates cable VBUS status. Caller must take care of locking.
+ */
+int musb_platform_get_vbus_status(struct musb *musb)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+	u32		otg_stat, prcm_mngmt;
+	int		ret = 0;
+
+	otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+	prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
+
+	/* Temporarily enable VBUS detection if it was disabled for
+	 * suspend mode. Unless it's enabled otg_stat and devctl will
+	 * not show correct VBUS state.
+	 */
+	if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
+		u32 tmp = prcm_mngmt;
+		tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+		musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
+		otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+		musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
+	}
+
+	if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
+		ret = 1;
+
+	return ret;
+}
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+	struct musb	*musb = (void *)_musb;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	switch (musb->xceiv.state) {
+	case OTG_STATE_A_WAIT_BCON:
+		if ((musb->a_wait_bcon != 0)
+			&& (musb->idle_timeout == 0
+				|| time_after(jiffies, musb->idle_timeout))) {
+			DBG(4, "Nothing connected %s, turning off VBUS\n",
+					otg_state_string(musb));
+		}
+		/* FALLTHROUGH */
+	case OTG_STATE_A_IDLE:
+		tusb_source_power(musb, 0);
+	default:
+		break;
+	}
+
+	if (!musb->is_active) {
+		u32	wakeups;
+
+		/* wait until khubd handles port change status */
+		if (is_host_active(musb) && (musb->port1_status >> 16))
+			goto done;
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+		if (is_peripheral_enabled(musb) && !musb->gadget_driver)
+			wakeups = 0;
+		else {
+			wakeups = TUSB_PRCM_WHOSTDISCON
+					| TUSB_PRCM_WBUS
+					| TUSB_PRCM_WVBUS;
+			if (is_otg_enabled(musb))
+				wakeups |= TUSB_PRCM_WID;
+		}
+#else
+		wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS;
+#endif
+		tusb_allow_idle(musb, wakeups);
+	}
+done:
+	spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/*
+ * Maybe put TUSB6010 into idle mode mode depending on USB link status,
+ * like "disconnected" or "suspended".  We'll be woken out of it by
+ * connect, resume, or disconnect.
+ *
+ * Needs to be called as the last function everywhere where there is
+ * register access to TUSB6010 because of NOR flash wake-up.
+ * Caller should own controller spinlock.
+ *
+ * Delay because peripheral enables D+ pullup 3msec after SE0, and
+ * we don't want to treat that full speed J as a wakeup event.
+ * ... peripherals must draw only suspend current after 10 msec.
+ */
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+	unsigned long		default_timeout = jiffies + msecs_to_jiffies(3);
+	static unsigned long	last_timer;
+
+	if (timeout == 0)
+		timeout = default_timeout;
+
+	/* Never idle if active, or when VBUS timeout is not set as host */
+	if (musb->is_active || ((musb->a_wait_bcon == 0)
+			&& (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+		DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+		del_timer(&musb_idle_timer);
+		last_timer = jiffies;
+		return;
+	}
+
+	if (time_after(last_timer, timeout)) {
+		if (!timer_pending(&musb_idle_timer))
+			last_timer = timeout;
+		else {
+			DBG(4, "Longer idle timer already pending, ignoring\n");
+			return;
+		}
+	}
+	last_timer = timeout;
+
+	DBG(4, "%s inactive, for idle timer for %lu ms\n",
+		otg_state_string(musb),
+		(unsigned long)jiffies_to_msecs(timeout - jiffies));
+	mod_timer(&musb_idle_timer, timeout);
+}
+
+/* ticks of 60 MHz clock */
+#define DEVCLOCK		60000000
+#define OTG_TIMER_MS(msecs)	((msecs) \
+		? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
+				| TUSB_DEV_OTG_TIMER_ENABLE) \
+		: 0)
+
+static void tusb_source_power(struct musb *musb, int is_on)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+	u32		conf, prcm, timer;
+	u8		devctl;
+
+	/* HDRC controls CPEN, but beware current surges during device
+	 * connect.  They can trigger transient overcurrent conditions
+	 * that must be ignored.
+	 */
+
+	prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
+	conf = musb_readl(tbase, TUSB_DEV_CONF);
+	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+	if (is_on) {
+		if (musb->set_clock)
+			musb->set_clock(musb->clock, 1);
+		timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
+		musb->xceiv.default_a = 1;
+		musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+		devctl |= MUSB_DEVCTL_SESSION;
+
+		conf |= TUSB_DEV_CONF_USB_HOST_MODE;
+		MUSB_HST_MODE(musb);
+	} else {
+		u32	otg_stat;
+
+		timer = 0;
+
+		/* If ID pin is grounded, we want to be a_idle */
+		otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+		if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
+			switch (musb->xceiv.state) {
+			case OTG_STATE_A_WAIT_VRISE:
+			case OTG_STATE_A_WAIT_BCON:
+				musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+				break;
+			case OTG_STATE_A_WAIT_VFALL:
+				musb->xceiv.state = OTG_STATE_A_IDLE;
+				break;
+			default:
+				musb->xceiv.state = OTG_STATE_A_IDLE;
+			}
+			musb->is_active = 0;
+			musb->xceiv.default_a = 1;
+			MUSB_HST_MODE(musb);
+		} else {
+			musb->is_active = 0;
+			musb->xceiv.default_a = 0;
+			musb->xceiv.state = OTG_STATE_B_IDLE;
+			MUSB_DEV_MODE(musb);
+		}
+
+		devctl &= ~MUSB_DEVCTL_SESSION;
+		conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
+		if (musb->set_clock)
+			musb->set_clock(musb->clock, 0);
+	}
+	prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+
+	musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
+	musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
+	musb_writel(tbase, TUSB_DEV_CONF, conf);
+	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+	DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
+		otg_state_string(musb),
+		musb_readb(musb->mregs, MUSB_DEVCTL),
+		musb_readl(tbase, TUSB_DEV_OTG_STAT),
+		conf, prcm);
+}
+
+/*
+ * Sets the mode to OTG, peripheral or host by changing the ID detection.
+ * Caller must take care of locking.
+ *
+ * Note that if a mini-A cable is plugged in the ID line will stay down as
+ * the weak ID pull-up is not able to pull the ID up.
+ *
+ * REVISIT: It would be possible to add support for changing between host
+ * and peripheral modes in non-OTG configurations by reconfiguring hardware
+ * and then setting musb->board_mode. For now, only support OTG mode.
+ */
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+	u32		otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
+
+	if (musb->board_mode != MUSB_OTG) {
+		ERR("Changing mode currently only supported in OTG mode\n");
+		return;
+	}
+
+	otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+	phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+	phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+	dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
+
+	switch (musb_mode) {
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+	case MUSB_HOST:		/* Disable PHY ID detect, ground ID */
+		phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+		phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+		dev_conf |= TUSB_DEV_CONF_ID_SEL;
+		dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
+		break;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+	case MUSB_PERIPHERAL:	/* Disable PHY ID detect, keep ID pull-up on */
+		phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+		phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+		dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+		break;
+#endif
+
+#ifdef CONFIG_USB_MUSB_OTG
+	case MUSB_OTG:		/* Use PHY ID detection */
+		phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+		phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+		dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+		break;
+#endif
+
+	default:
+		DBG(2, "Trying to set unknown mode %i\n", musb_mode);
+	}
+
+	musb_writel(tbase, TUSB_PHY_OTG_CTRL,
+			TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
+	musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
+			TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
+	musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
+
+	otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+	if ((musb_mode == MUSB_PERIPHERAL) &&
+		!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
+			INFO("Cannot be peripheral with mini-A cable "
+			"otg_stat: %08x\n", otg_stat);
+}
+
+static inline unsigned long
+tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
+{
+	u32		otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+	unsigned long	idle_timeout = 0;
+
+	/* ID pin */
+	if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
+		int	default_a;
+
+		if (is_otg_enabled(musb))
+			default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
+		else
+			default_a = is_host_enabled(musb);
+		DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
+		musb->xceiv.default_a = default_a;
+		tusb_source_power(musb, default_a);
+
+		/* Don't allow idling immediately */
+		if (default_a)
+			idle_timeout = jiffies + (HZ * 3);
+	}
+
+	/* VBUS state change */
+	if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
+
+		/* B-dev state machine:  no vbus ~= disconnect */
+		if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
+				|| !is_host_enabled(musb)) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+			/* ? musb_root_disconnect(musb); */
+			musb->port1_status &=
+				~(USB_PORT_STAT_CONNECTION
+				| USB_PORT_STAT_ENABLE
+				| USB_PORT_STAT_LOW_SPEED
+				| USB_PORT_STAT_HIGH_SPEED
+				| USB_PORT_STAT_TEST
+				);
+#endif
+
+			if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
+				DBG(1, "Forcing disconnect (no interrupt)\n");
+				if (musb->xceiv.state != OTG_STATE_B_IDLE) {
+					/* INTR_DISCONNECT can hide... */
+					musb->xceiv.state = OTG_STATE_B_IDLE;
+					musb->int_usb |= MUSB_INTR_DISCONNECT;
+				}
+				musb->is_active = 0;
+			}
+			DBG(2, "vbus change, %s, otg %03x\n",
+				otg_state_string(musb), otg_stat);
+			idle_timeout = jiffies + (1 * HZ);
+			schedule_work(&musb->irq_work);
+
+		} else /* A-dev state machine */ {
+			DBG(2, "vbus change, %s, otg %03x\n",
+				otg_state_string(musb), otg_stat);
+
+			switch (musb->xceiv.state) {
+			case OTG_STATE_A_IDLE:
+				DBG(2, "Got SRP, turning on VBUS\n");
+				musb_set_vbus(musb, 1);
+
+				/* CONNECT can wake if a_wait_bcon is set */
+				if (musb->a_wait_bcon != 0)
+					musb->is_active = 0;
+				else
+					musb->is_active = 1;
+
+				/*
+				 * OPT FS A TD.4.6 needs few seconds for
+				 * A_WAIT_VRISE
+				 */
+				idle_timeout = jiffies + (2 * HZ);
+
+				break;
+			case OTG_STATE_A_WAIT_VRISE:
+				/* ignore; A-session-valid < VBUS_VALID/2,
+				 * we monitor this with the timer
+				 */
+				break;
+			case OTG_STATE_A_WAIT_VFALL:
+				/* REVISIT this irq triggers during short
+				 * spikes caused by enumeration ...
+				 */
+				if (musb->vbuserr_retry) {
+					musb->vbuserr_retry--;
+					tusb_source_power(musb, 1);
+				} else {
+					musb->vbuserr_retry
+						= VBUSERR_RETRY_COUNT;
+					tusb_source_power(musb, 0);
+				}
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	/* OTG timer expiration */
+	if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
+		u8	devctl;
+
+		DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
+
+		switch (musb->xceiv.state) {
+		case OTG_STATE_A_WAIT_VRISE:
+			/* VBUS has probably been valid for a while now,
+			 * but may well have bounced out of range a bit
+			 */
+			devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+			if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
+				if ((devctl & MUSB_DEVCTL_VBUS)
+						!= MUSB_DEVCTL_VBUS) {
+					DBG(2, "devctl %02x\n", devctl);
+					break;
+				}
+				musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+				musb->is_active = 0;
+				idle_timeout = jiffies
+					+ msecs_to_jiffies(musb->a_wait_bcon);
+			} else {
+				/* REVISIT report overcurrent to hub? */
+				ERR("vbus too slow, devctl %02x\n", devctl);
+				tusb_source_power(musb, 0);
+			}
+			break;
+		case OTG_STATE_A_WAIT_BCON:
+			if (musb->a_wait_bcon != 0)
+				idle_timeout = jiffies
+					+ msecs_to_jiffies(musb->a_wait_bcon);
+			break;
+		case OTG_STATE_A_SUSPEND:
+			break;
+		case OTG_STATE_B_WAIT_ACON:
+			break;
+		default:
+			break;
+		}
+	}
+	schedule_work(&musb->irq_work);
+
+	return idle_timeout;
+}
+
+static irqreturn_t tusb_interrupt(int irq, void *__hci)
+{
+	struct musb	*musb = __hci;
+	void __iomem	*tbase = musb->ctrl_base;
+	unsigned long	flags, idle_timeout = 0;
+	u32		int_mask, int_src;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	/* Mask all interrupts to allow using both edge and level GPIO irq */
+	int_mask = musb_readl(tbase, TUSB_INT_MASK);
+	musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+
+	int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
+	DBG(3, "TUSB IRQ %08x\n", int_src);
+
+	musb->int_usb = (u8) int_src;
+
+	/* Acknowledge wake-up source interrupts */
+	if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
+		u32	reg;
+		u32	i;
+
+		if (tusb_get_revision(musb) == TUSB_REV_30)
+			tusb_wbus_quirk(musb, 0);
+
+		/* there are issues re-locking the PLL on wakeup ... */
+
+		/* work around issue 8 */
+		for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
+			musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
+			musb_writel(tbase, TUSB_SCRATCH_PAD, i);
+			reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
+			if (reg == i)
+				break;
+			DBG(6, "TUSB NOR not ready\n");
+		}
+
+		/* work around issue 13 (2nd half) */
+		tusb_set_clock_source(musb, 1);
+
+		reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
+		musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
+		if (reg & ~TUSB_PRCM_WNORCS) {
+			musb->is_active = 1;
+			schedule_work(&musb->irq_work);
+		}
+		DBG(3, "wake %sactive %02x\n",
+				musb->is_active ? "" : "in", reg);
+
+		/* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
+	}
+
+	if (int_src & TUSB_INT_SRC_USB_IP_CONN)
+		del_timer(&musb_idle_timer);
+
+	/* OTG state change reports (annoyingly) not issued by Mentor core */
+	if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
+				| TUSB_INT_SRC_OTG_TIMEOUT
+				| TUSB_INT_SRC_ID_STATUS_CHNG))
+		idle_timeout = tusb_otg_ints(musb, int_src, tbase);
+
+	/* TX dma callback must be handled here, RX dma callback is
+	 * handled in tusb_omap_dma_cb.
+	 */
+	if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
+		u32	dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
+		u32	real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
+
+		DBG(3, "DMA IRQ %08x\n", dma_src);
+		real_dma_src = ~real_dma_src & dma_src;
+		if (tusb_dma_omap() && real_dma_src) {
+			int	tx_source = (real_dma_src & 0xffff);
+			int	i;
+
+			for (i = 1; i <= 15; i++) {
+				if (tx_source & (1 << i)) {
+					DBG(3, "completing ep%i %s\n", i, "tx");
+					musb_dma_completion(musb, i, 1);
+				}
+			}
+		}
+		musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
+	}
+
+	/* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
+	if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
+		u32	musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
+
+		musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
+		musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
+		musb->int_tx = (musb_src & 0xffff);
+	} else {
+		musb->int_rx = 0;
+		musb->int_tx = 0;
+	}
+
+	if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
+		musb_interrupt(musb);
+
+	/* Acknowledge TUSB interrupts. Clear only non-reserved bits */
+	musb_writel(tbase, TUSB_INT_SRC_CLEAR,
+		int_src & ~TUSB_INT_MASK_RESERVED_BITS);
+
+	musb_platform_try_idle(musb, idle_timeout);
+
+	musb_writel(tbase, TUSB_INT_MASK, int_mask);
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int dma_off;
+
+/*
+ * Enables TUSB6010. Caller must take care of locking.
+ * REVISIT:
+ * - Check what is unnecessary in MGC_HdrcStart()
+ */
+void musb_platform_enable(struct musb *musb)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+
+	/* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
+	 * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
+	musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
+
+	/* Setup TUSB interrupt, disable DMA and GPIO interrupts */
+	musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
+	musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+	musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+	/* Clear all subsystem interrups */
+	musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
+	musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
+	musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
+
+	/* Acknowledge pending interrupt(s) */
+	musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
+
+	/* Only 0 clock cycles for minimum interrupt de-assertion time and
+	 * interrupt polarity active low seems to work reliably here */
+	musb_writel(tbase, TUSB_INT_CTRL_CONF,
+			TUSB_INT_CTRL_CONF_INT_RELCYC(0));
+
+	set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
+
+	/* maybe force into the Default-A OTG state machine */
+	if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
+			& TUSB_DEV_OTG_STAT_ID_STATUS))
+		musb_writel(tbase, TUSB_INT_SRC_SET,
+				TUSB_INT_SRC_ID_STATUS_CHNG);
+
+	if (is_dma_capable() && dma_off)
+		printk(KERN_WARNING "%s %s: dma not reactivated\n",
+				__FILE__, __func__);
+	else
+		dma_off = 1;
+}
+
+/*
+ * Disables TUSB6010. Caller must take care of locking.
+ */
+void musb_platform_disable(struct musb *musb)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+
+	/* FIXME stop DMA, IRQs, timers, ... */
+
+	/* disable all IRQs */
+	musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+	musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
+	musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+	musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+	del_timer(&musb_idle_timer);
+
+	if (is_dma_capable() && !dma_off) {
+		printk(KERN_WARNING "%s %s: dma still active\n",
+				__FILE__, __func__);
+		dma_off = 1;
+	}
+}
+
+/*
+ * Sets up TUSB6010 CPU interface specific signals and registers
+ * Note: Settings optimized for OMAP24xx
+ */
+static void __init tusb_setup_cpu_interface(struct musb *musb)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+
+	/*
+	 * Disable GPIO[5:0] pullups (used as output DMA requests)
+	 * Don't disable GPIO[7:6] as they are needed for wake-up.
+	 */
+	musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
+
+	/* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
+	musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
+
+	/* Turn GPIO[5:0] to DMAREQ[5:0] signals */
+	musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
+
+	/* Burst size 16x16 bits, all six DMA requests enabled, DMA request
+	 * de-assertion time 2 system clocks p 62 */
+	musb_writel(tbase, TUSB_DMA_REQ_CONF,
+		TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
+		TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
+		TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+	/* Set 0 wait count for synchronous burst access */
+	musb_writel(tbase, TUSB_WAIT_COUNT, 1);
+}
+
+static int __init tusb_start(struct musb *musb)
+{
+	void __iomem	*tbase = musb->ctrl_base;
+	int		ret = 0;
+	unsigned long	flags;
+	u32		reg;
+
+	if (musb->board_set_power)
+		ret = musb->board_set_power(1);
+	if (ret != 0) {
+		printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
+		return ret;
+	}
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
+		TUSB_PROD_TEST_RESET_VAL) {
+		printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
+		goto err;
+	}
+
+	ret = tusb_print_revision(musb);
+	if (ret < 2) {
+		printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
+				ret);
+		goto err;
+	}
+
+	/* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
+	 * NOR FLASH interface is used */
+	musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
+
+	/* Select PHY free running 60MHz as a system clock */
+	tusb_set_clock_source(musb, 1);
+
+	/* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
+	 * power saving, enable VBus detect and session end comparators,
+	 * enable IDpullup, enable VBus charging */
+	musb_writel(tbase, TUSB_PRCM_MNGMT,
+		TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
+		TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
+		TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
+		TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
+		TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
+	tusb_setup_cpu_interface(musb);
+
+	/* simplify:  always sense/pullup ID pins, as if in OTG mode */
+	reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+	reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+	musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
+
+	reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+	reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+	musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	return 0;
+
+err:
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	if (musb->board_set_power)
+		musb->board_set_power(0);
+
+	return -ENODEV;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+	struct platform_device	*pdev;
+	struct resource		*mem;
+	void __iomem		*sync;
+	int			ret;
+
+	pdev = to_platform_device(musb->controller);
+
+	/* dma address for async dma */
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	musb->async = mem->start;
+
+	/* dma address for sync dma */
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!mem) {
+		pr_debug("no sync dma resource?\n");
+		return -ENODEV;
+	}
+	musb->sync = mem->start;
+
+	sync = ioremap(mem->start, mem->end - mem->start + 1);
+	if (!sync) {
+		pr_debug("ioremap for sync failed\n");
+		return -ENOMEM;
+	}
+	musb->sync_va = sync;
+
+	/* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
+	 * FIFOs at 0x600, TUSB at 0x800
+	 */
+	musb->mregs += TUSB_BASE_OFFSET;
+
+	ret = tusb_start(musb);
+	if (ret) {
+		printk(KERN_ERR "Could not start tusb6010 (%d)\n",
+				ret);
+		return -ENODEV;
+	}
+	musb->isr = tusb_interrupt;
+
+	if (is_host_enabled(musb))
+		musb->board_set_vbus = tusb_source_power;
+	if (is_peripheral_enabled(musb))
+		musb->xceiv.set_power = tusb_draw_power;
+
+	setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+	return ret;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+	del_timer_sync(&musb_idle_timer);
+
+	if (musb->board_set_power)
+		musb->board_set_power(0);
+
+	iounmap(musb->sync_va);
+
+	return 0;
+}
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
new file mode 100644
index 0000000..ab8c962
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.h
@@ -0,0 +1,233 @@
+/*
+ * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TUSB6010_H__
+#define __TUSB6010_H__
+
+extern u8 tusb_get_revision(struct musb *musb);
+
+#ifdef CONFIG_USB_TUSB6010
+#define musb_in_tusb()			1
+#else
+#define musb_in_tusb()			0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap()			1
+#else
+#define tusb_dma_omap()			0
+#endif
+
+/* VLYNQ control register. 32-bit at offset 0x000 */
+#define TUSB_VLYNQ_CTRL			0x004
+
+/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */
+#define TUSB_BASE_OFFSET		0x400
+
+/* FIFO registers 32-bit at offset 0x600 */
+#define TUSB_FIFO_BASE			0x600
+
+/* Device System & Control registers. 32-bit at offset 0x800 */
+#define TUSB_SYS_REG_BASE		0x800
+
+#define TUSB_DEV_CONF			(TUSB_SYS_REG_BASE + 0x000)
+#define		TUSB_DEV_CONF_USB_HOST_MODE		(1 << 16)
+#define		TUSB_DEV_CONF_PROD_TEST_MODE		(1 << 15)
+#define		TUSB_DEV_CONF_SOFT_ID			(1 << 1)
+#define		TUSB_DEV_CONF_ID_SEL			(1 << 0)
+
+#define TUSB_PHY_OTG_CTRL_ENABLE	(TUSB_SYS_REG_BASE + 0x004)
+#define TUSB_PHY_OTG_CTRL		(TUSB_SYS_REG_BASE + 0x008)
+#define		TUSB_PHY_OTG_CTRL_WRPROTECT		(0xa5 << 24)
+#define		TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP		(1 << 23)
+#define		TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN	(1 << 19)
+#define		TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN	(1 << 18)
+#define		TUSB_PHY_OTG_CTRL_TESTM2		(1 << 17)
+#define		TUSB_PHY_OTG_CTRL_TESTM1		(1 << 16)
+#define		TUSB_PHY_OTG_CTRL_TESTM0		(1 << 15)
+#define		TUSB_PHY_OTG_CTRL_TX_DATA2		(1 << 14)
+#define		TUSB_PHY_OTG_CTRL_TX_GZ2		(1 << 13)
+#define		TUSB_PHY_OTG_CTRL_TX_ENABLE2		(1 << 12)
+#define		TUSB_PHY_OTG_CTRL_DM_PULLDOWN		(1 << 11)
+#define		TUSB_PHY_OTG_CTRL_DP_PULLDOWN		(1 << 10)
+#define		TUSB_PHY_OTG_CTRL_OSC_EN		(1 << 9)
+#define		TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v)	(((v) & 3) << 7)
+#define		TUSB_PHY_OTG_CTRL_PD			(1 << 6)
+#define		TUSB_PHY_OTG_CTRL_PLL_ON		(1 << 5)
+#define		TUSB_PHY_OTG_CTRL_EXT_RPU		(1 << 4)
+#define		TUSB_PHY_OTG_CTRL_PWR_GOOD		(1 << 3)
+#define		TUSB_PHY_OTG_CTRL_RESET			(1 << 2)
+#define		TUSB_PHY_OTG_CTRL_SUSPENDM		(1 << 1)
+#define		TUSB_PHY_OTG_CTRL_CLK_MODE		(1 << 0)
+
+/*OTG status register */
+#define TUSB_DEV_OTG_STAT		(TUSB_SYS_REG_BASE + 0x00c)
+#define		TUSB_DEV_OTG_STAT_PWR_CLK_GOOD		(1 << 8)
+#define		TUSB_DEV_OTG_STAT_SESS_END		(1 << 7)
+#define		TUSB_DEV_OTG_STAT_SESS_VALID		(1 << 6)
+#define		TUSB_DEV_OTG_STAT_VBUS_VALID		(1 << 5)
+#define		TUSB_DEV_OTG_STAT_VBUS_SENSE		(1 << 4)
+#define		TUSB_DEV_OTG_STAT_ID_STATUS		(1 << 3)
+#define		TUSB_DEV_OTG_STAT_HOST_DISCON		(1 << 2)
+#define		TUSB_DEV_OTG_STAT_LINE_STATE		(3 << 0)
+#define		TUSB_DEV_OTG_STAT_DP_ENABLE		(1 << 1)
+#define		TUSB_DEV_OTG_STAT_DM_ENABLE		(1 << 0)
+
+#define TUSB_DEV_OTG_TIMER		(TUSB_SYS_REG_BASE + 0x010)
+#	define TUSB_DEV_OTG_TIMER_ENABLE		(1 << 31)
+#	define TUSB_DEV_OTG_TIMER_VAL(v)		((v) & 0x07ffffff)
+#define TUSB_PRCM_REV			(TUSB_SYS_REG_BASE + 0x014)
+
+/* PRCM configuration register */
+#define TUSB_PRCM_CONF			(TUSB_SYS_REG_BASE + 0x018)
+#define		TUSB_PRCM_CONF_SFW_CPEN		(1 << 24)
+#define		TUSB_PRCM_CONF_SYS_CLKSEL(v)	(((v) & 3) << 16)
+
+/* PRCM management register */
+#define TUSB_PRCM_MNGMT			(TUSB_SYS_REG_BASE + 0x01c)
+#define		TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v)	(((v) & 0xf) << 25)
+#define		TUSB_PRCM_MNGMT_SRP_FIX_EN		(1 << 24)
+#define		TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v)	(((v) & 0xf) << 20)
+#define		TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN	(1 << 19)
+#define		TUSB_PRCM_MNGMT_DFT_CLK_DIS		(1 << 18)
+#define		TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS		(1 << 17)
+#define		TUSB_PRCM_MNGMT_OTG_SESS_END_EN		(1 << 10)
+#define		TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN		(1 << 9)
+#define		TUSB_PRCM_MNGMT_OTG_ID_PULLUP		(1 << 8)
+#define		TUSB_PRCM_MNGMT_15_SW_EN		(1 << 4)
+#define		TUSB_PRCM_MNGMT_33_SW_EN		(1 << 3)
+#define		TUSB_PRCM_MNGMT_5V_CPEN			(1 << 2)
+#define		TUSB_PRCM_MNGMT_PM_IDLE			(1 << 1)
+#define		TUSB_PRCM_MNGMT_DEV_IDLE		(1 << 0)
+
+/* Wake-up source clear and mask registers */
+#define TUSB_PRCM_WAKEUP_SOURCE		(TUSB_SYS_REG_BASE + 0x020)
+#define TUSB_PRCM_WAKEUP_CLEAR		(TUSB_SYS_REG_BASE + 0x028)
+#define TUSB_PRCM_WAKEUP_MASK		(TUSB_SYS_REG_BASE + 0x02c)
+#define		TUSB_PRCM_WAKEUP_RESERVED_BITS	(0xffffe << 13)
+#define		TUSB_PRCM_WGPIO_7	(1 << 12)
+#define		TUSB_PRCM_WGPIO_6	(1 << 11)
+#define		TUSB_PRCM_WGPIO_5	(1 << 10)
+#define		TUSB_PRCM_WGPIO_4	(1 << 9)
+#define		TUSB_PRCM_WGPIO_3	(1 << 8)
+#define		TUSB_PRCM_WGPIO_2	(1 << 7)
+#define		TUSB_PRCM_WGPIO_1	(1 << 6)
+#define		TUSB_PRCM_WGPIO_0	(1 << 5)
+#define		TUSB_PRCM_WHOSTDISCON	(1 << 4)	/* Host disconnect */
+#define		TUSB_PRCM_WBUS		(1 << 3)	/* USB bus resume */
+#define		TUSB_PRCM_WNORCS	(1 << 2)	/* NOR chip select */
+#define		TUSB_PRCM_WVBUS		(1 << 1)	/* OTG PHY VBUS */
+#define		TUSB_PRCM_WID		(1 << 0)	/* OTG PHY ID detect */
+
+#define TUSB_PULLUP_1_CTRL		(TUSB_SYS_REG_BASE + 0x030)
+#define TUSB_PULLUP_2_CTRL		(TUSB_SYS_REG_BASE + 0x034)
+#define TUSB_INT_CTRL_REV		(TUSB_SYS_REG_BASE + 0x038)
+#define TUSB_INT_CTRL_CONF		(TUSB_SYS_REG_BASE + 0x03c)
+#define TUSB_USBIP_INT_SRC		(TUSB_SYS_REG_BASE + 0x040)
+#define TUSB_USBIP_INT_SET		(TUSB_SYS_REG_BASE + 0x044)
+#define TUSB_USBIP_INT_CLEAR		(TUSB_SYS_REG_BASE + 0x048)
+#define TUSB_USBIP_INT_MASK		(TUSB_SYS_REG_BASE + 0x04c)
+#define TUSB_DMA_INT_SRC		(TUSB_SYS_REG_BASE + 0x050)
+#define TUSB_DMA_INT_SET		(TUSB_SYS_REG_BASE + 0x054)
+#define TUSB_DMA_INT_CLEAR		(TUSB_SYS_REG_BASE + 0x058)
+#define TUSB_DMA_INT_MASK		(TUSB_SYS_REG_BASE + 0x05c)
+#define TUSB_GPIO_INT_SRC		(TUSB_SYS_REG_BASE + 0x060)
+#define TUSB_GPIO_INT_SET		(TUSB_SYS_REG_BASE + 0x064)
+#define TUSB_GPIO_INT_CLEAR		(TUSB_SYS_REG_BASE + 0x068)
+#define TUSB_GPIO_INT_MASK		(TUSB_SYS_REG_BASE + 0x06c)
+
+/* NOR flash interrupt source registers */
+#define TUSB_INT_SRC			(TUSB_SYS_REG_BASE + 0x070)
+#define TUSB_INT_SRC_SET		(TUSB_SYS_REG_BASE + 0x074)
+#define TUSB_INT_SRC_CLEAR		(TUSB_SYS_REG_BASE + 0x078)
+#define TUSB_INT_MASK			(TUSB_SYS_REG_BASE + 0x07c)
+#define		TUSB_INT_SRC_TXRX_DMA_DONE		(1 << 24)
+#define		TUSB_INT_SRC_USB_IP_CORE		(1 << 17)
+#define		TUSB_INT_SRC_OTG_TIMEOUT		(1 << 16)
+#define		TUSB_INT_SRC_VBUS_SENSE_CHNG		(1 << 15)
+#define		TUSB_INT_SRC_ID_STATUS_CHNG		(1 << 14)
+#define		TUSB_INT_SRC_DEV_WAKEUP			(1 << 13)
+#define		TUSB_INT_SRC_DEV_READY			(1 << 12)
+#define		TUSB_INT_SRC_USB_IP_TX			(1 << 9)
+#define		TUSB_INT_SRC_USB_IP_RX			(1 << 8)
+#define		TUSB_INT_SRC_USB_IP_VBUS_ERR		(1 << 7)
+#define		TUSB_INT_SRC_USB_IP_VBUS_REQ		(1 << 6)
+#define		TUSB_INT_SRC_USB_IP_DISCON		(1 << 5)
+#define		TUSB_INT_SRC_USB_IP_CONN		(1 << 4)
+#define		TUSB_INT_SRC_USB_IP_SOF			(1 << 3)
+#define		TUSB_INT_SRC_USB_IP_RST_BABBLE		(1 << 2)
+#define		TUSB_INT_SRC_USB_IP_RESUME		(1 << 1)
+#define		TUSB_INT_SRC_USB_IP_SUSPEND		(1 << 0)
+
+/* NOR flash interrupt registers reserved bits. Must be written as 0 */
+#define		TUSB_INT_MASK_RESERVED_17		(0x3fff << 17)
+#define		TUSB_INT_MASK_RESERVED_13		(1 << 13)
+#define		TUSB_INT_MASK_RESERVED_8		(0xf << 8)
+#define		TUSB_INT_SRC_RESERVED_26		(0x1f << 26)
+#define		TUSB_INT_SRC_RESERVED_18		(0x3f << 18)
+#define		TUSB_INT_SRC_RESERVED_10		(0x03 << 10)
+
+/* Reserved bits for NOR flash interrupt mask and clear register */
+#define		TUSB_INT_MASK_RESERVED_BITS	(TUSB_INT_MASK_RESERVED_17 | \
+						TUSB_INT_MASK_RESERVED_13 | \
+						TUSB_INT_MASK_RESERVED_8)
+
+/* Reserved bits for NOR flash interrupt status register */
+#define		TUSB_INT_SRC_RESERVED_BITS	(TUSB_INT_SRC_RESERVED_26 | \
+						TUSB_INT_SRC_RESERVED_18 | \
+						TUSB_INT_SRC_RESERVED_10)
+
+#define TUSB_GPIO_REV			(TUSB_SYS_REG_BASE + 0x080)
+#define TUSB_GPIO_CONF			(TUSB_SYS_REG_BASE + 0x084)
+#define TUSB_DMA_CTRL_REV		(TUSB_SYS_REG_BASE + 0x100)
+#define TUSB_DMA_REQ_CONF		(TUSB_SYS_REG_BASE + 0x104)
+#define TUSB_EP0_CONF			(TUSB_SYS_REG_BASE + 0x108)
+#define TUSB_DMA_EP_MAP			(TUSB_SYS_REG_BASE + 0x148)
+
+/* Offsets from each ep base register */
+#define TUSB_EP_TX_OFFSET		0x10c	/* EP_IN in docs */
+#define TUSB_EP_RX_OFFSET		0x14c	/* EP_OUT in docs */
+#define TUSB_EP_MAX_PACKET_SIZE_OFFSET	0x188
+
+#define TUSB_WAIT_COUNT			(TUSB_SYS_REG_BASE + 0x1c8)
+#define TUSB_SCRATCH_PAD		(TUSB_SYS_REG_BASE + 0x1c4)
+#define TUSB_PROD_TEST_RESET		(TUSB_SYS_REG_BASE + 0x1d8)
+
+/* Device System & Control register bitfields */
+#define TUSB_INT_CTRL_CONF_INT_RELCYC(v)	(((v) & 0x7) << 18)
+#define TUSB_INT_CTRL_CONF_INT_POLARITY		(1 << 17)
+#define TUSB_INT_CTRL_CONF_INT_MODE		(1 << 16)
+#define TUSB_GPIO_CONF_DMAREQ(v)		(((v) & 0x3f) << 24)
+#define TUSB_DMA_REQ_CONF_BURST_SIZE(v)		(((v) & 3) << 26)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v)		(((v) & 0x3f) << 20)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v)	(((v) & 0xf) << 16)
+#define TUSB_EP0_CONFIG_SW_EN			(1 << 8)
+#define TUSB_EP0_CONFIG_DIR_TX			(1 << 7)
+#define TUSB_EP0_CONFIG_XFR_SIZE(v)		((v) & 0x7f)
+#define TUSB_EP_CONFIG_SW_EN			(1 << 31)
+#define TUSB_EP_CONFIG_XFR_SIZE(v)		((v) & 0x7fffffff)
+#define TUSB_PROD_TEST_RESET_VAL		0xa596
+#define TUSB_EP_FIFO(ep)			(TUSB_FIFO_BASE + (ep) * 0x20)
+
+#define TUSB_DIDR1_LO				(TUSB_SYS_REG_BASE + 0x1f8)
+#define TUSB_DIDR1_HI				(TUSB_SYS_REG_BASE + 0x1fc)
+#define		TUSB_DIDR1_HI_CHIP_REV(v)		(((v) >> 17) & 0xf)
+#define			TUSB_DIDR1_HI_REV_20		0
+#define			TUSB_DIDR1_HI_REV_30		1
+#define			TUSB_DIDR1_HI_REV_31		2
+
+#define TUSB_REV_10	0x10
+#define TUSB_REV_20	0x20
+#define TUSB_REV_30	0x30
+#define TUSB_REV_31	0x31
+
+#endif /* __TUSB6010_H__ */
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
new file mode 100644
index 0000000..52f7f29
--- /dev/null
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -0,0 +1,719 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+
+#define to_chdat(c)		((struct tusb_omap_dma_ch *)(c)->private_data)
+
+#define MAX_DMAREQ		5	/* REVISIT: Really 6, but req5 not OK */
+
+struct tusb_omap_dma_ch {
+	struct musb		*musb;
+	void __iomem		*tbase;
+	unsigned long		phys_offset;
+	int			epnum;
+	u8			tx;
+	struct musb_hw_ep	*hw_ep;
+
+	int			ch;
+	s8			dmareq;
+	s8			sync_dev;
+
+	struct tusb_omap_dma	*tusb_dma;
+
+	void __iomem		*dma_addr;
+
+	u32			len;
+	u16			packet_sz;
+	u16			transfer_packet_sz;
+	u32			transfer_len;
+	u32			completed_len;
+};
+
+struct tusb_omap_dma {
+	struct dma_controller		controller;
+	struct musb			*musb;
+	void __iomem			*tbase;
+
+	int				ch;
+	s8				dmareq;
+	s8				sync_dev;
+	unsigned			multichannel:1;
+};
+
+static int tusb_omap_dma_start(struct dma_controller *c)
+{
+	struct tusb_omap_dma	*tusb_dma;
+
+	tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+	/* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+	return 0;
+}
+
+static int tusb_omap_dma_stop(struct dma_controller *c)
+{
+	struct tusb_omap_dma	*tusb_dma;
+
+	tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+	/* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+	return 0;
+}
+
+/*
+ * Allocate dmareq0 to the current channel unless it's already taken
+ */
+static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+	u32		reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+	if (reg != 0) {
+		DBG(3, "ep%i dmareq0 is busy for ep%i\n",
+			chdat->epnum, reg & 0xf);
+		return -EAGAIN;
+	}
+
+	if (chdat->tx)
+		reg = (1 << 4) | chdat->epnum;
+	else
+		reg = chdat->epnum;
+
+	musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+	return 0;
+}
+
+static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+	u32		reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+	if ((reg & 0xf) != chdat->epnum) {
+		printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
+			chdat->epnum, reg & 0xf);
+		return;
+	}
+	musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
+}
+
+/*
+ * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
+ * musb_gadget.c.
+ */
+static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
+{
+	struct dma_channel	*channel = (struct dma_channel *)data;
+	struct tusb_omap_dma_ch	*chdat = to_chdat(channel);
+	struct tusb_omap_dma	*tusb_dma = chdat->tusb_dma;
+	struct musb		*musb = chdat->musb;
+	struct musb_hw_ep	*hw_ep = chdat->hw_ep;
+	void __iomem		*ep_conf = hw_ep->conf;
+	void __iomem		*mbase = musb->mregs;
+	unsigned long		remaining, flags, pio;
+	int			ch;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	if (tusb_dma->multichannel)
+		ch = chdat->ch;
+	else
+		ch = tusb_dma->ch;
+
+	if (ch_status != OMAP_DMA_BLOCK_IRQ)
+		printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
+
+	DBG(3, "ep%i %s dma callback ch: %i status: %x\n",
+		chdat->epnum, chdat->tx ? "tx" : "rx",
+		ch, ch_status);
+
+	if (chdat->tx)
+		remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+	else
+		remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+	remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
+
+	/* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
+	if (unlikely(remaining > chdat->transfer_len)) {
+		DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
+			chdat->tx ? "tx" : "rx", chdat->ch,
+			remaining);
+		remaining = 0;
+	}
+
+	channel->actual_len = chdat->transfer_len - remaining;
+	pio = chdat->len - channel->actual_len;
+
+	DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
+
+	/* Transfer remaining 1 - 31 bytes */
+	if (pio > 0 && pio < 32) {
+		u8	*buf;
+
+		DBG(3, "Using PIO for remaining %lu bytes\n", pio);
+		buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
+		if (chdat->tx) {
+			dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+					chdat->transfer_len, DMA_TO_DEVICE);
+			musb_write_fifo(hw_ep, pio, buf);
+		} else {
+			musb_read_fifo(hw_ep, pio, buf);
+			dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+					chdat->transfer_len, DMA_FROM_DEVICE);
+		}
+		channel->actual_len += pio;
+	}
+
+	if (!tusb_dma->multichannel)
+		tusb_omap_free_shared_dmareq(chdat);
+
+	channel->status = MUSB_DMA_STATUS_FREE;
+
+	/* Handle only RX callbacks here. TX callbacks must be handled based
+	 * on the TUSB DMA status interrupt.
+	 * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
+	 * interrupt for RX and TX.
+	 */
+	if (!chdat->tx)
+		musb_dma_completion(musb, chdat->epnum, chdat->tx);
+
+	/* We must terminate short tx transfers manually by setting TXPKTRDY.
+	 * REVISIT: This same problem may occur with other MUSB dma as well.
+	 * Easy to test with g_ether by pinging the MUSB board with ping -s54.
+	 */
+	if ((chdat->transfer_len < chdat->packet_sz)
+			|| (chdat->transfer_len % chdat->packet_sz != 0)) {
+		u16	csr;
+
+		if (chdat->tx) {
+			DBG(3, "terminating short tx packet\n");
+			musb_ep_select(mbase, chdat->epnum);
+			csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+			csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
+				| MUSB_TXCSR_P_WZC_BITS;
+			musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+		}
+	}
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
+				u8 rndis_mode, dma_addr_t dma_addr, u32 len)
+{
+	struct tusb_omap_dma_ch		*chdat = to_chdat(channel);
+	struct tusb_omap_dma		*tusb_dma = chdat->tusb_dma;
+	struct musb			*musb = chdat->musb;
+	struct musb_hw_ep		*hw_ep = chdat->hw_ep;
+	void __iomem			*mbase = musb->mregs;
+	void __iomem			*ep_conf = hw_ep->conf;
+	dma_addr_t			fifo = hw_ep->fifo_sync;
+	struct omap_dma_channel_params	dma_params;
+	u32				dma_remaining;
+	int				src_burst, dst_burst;
+	u16				csr;
+	int				ch;
+	s8				dmareq;
+	s8				sync_dev;
+
+	if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
+		return false;
+
+	/*
+	 * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
+	 * register which will cause missed DMA interrupt. We could try to
+	 * use a timer for the callback, but it is unsafe as the XFR_SIZE
+	 * register is corrupt, and we won't know if the DMA worked.
+	 */
+	if (dma_addr & 0x2)
+		return false;
+
+	/*
+	 * Because of HW issue #10, it seems like mixing sync DMA and async
+	 * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
+	 * using the channel for DMA.
+	 */
+	if (chdat->tx)
+		dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+	else
+		dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+	dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
+	if (dma_remaining) {
+		DBG(2, "Busy %s dma ch%i, not using: %08x\n",
+			chdat->tx ? "tx" : "rx", chdat->ch,
+			dma_remaining);
+		return false;
+	}
+
+	chdat->transfer_len = len & ~0x1f;
+
+	if (len < packet_sz)
+		chdat->transfer_packet_sz = chdat->transfer_len;
+	else
+		chdat->transfer_packet_sz = packet_sz;
+
+	if (tusb_dma->multichannel) {
+		ch = chdat->ch;
+		dmareq = chdat->dmareq;
+		sync_dev = chdat->sync_dev;
+	} else {
+		if (tusb_omap_use_shared_dmareq(chdat) != 0) {
+			DBG(3, "could not get dma for ep%i\n", chdat->epnum);
+			return false;
+		}
+		if (tusb_dma->ch < 0) {
+			/* REVISIT: This should get blocked earlier, happens
+			 * with MSC ErrorRecoveryTest
+			 */
+			WARN_ON(1);
+			return false;
+		}
+
+		ch = tusb_dma->ch;
+		dmareq = tusb_dma->dmareq;
+		sync_dev = tusb_dma->sync_dev;
+		omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
+	}
+
+	chdat->packet_sz = packet_sz;
+	chdat->len = len;
+	channel->actual_len = 0;
+	chdat->dma_addr = (void __iomem *)dma_addr;
+	channel->status = MUSB_DMA_STATUS_BUSY;
+
+	/* Since we're recycling dma areas, we need to clean or invalidate */
+	if (chdat->tx)
+		dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE);
+	else
+		dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE);
+
+	/* Use 16-bit transfer if dma_addr is not 32-bit aligned */
+	if ((dma_addr & 0x3) == 0) {
+		dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+		dma_params.elem_count = 8;		/* Elements in frame */
+	} else {
+		dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
+		dma_params.elem_count = 16;		/* Elements in frame */
+		fifo = hw_ep->fifo_async;
+	}
+
+	dma_params.frame_count	= chdat->transfer_len / 32; /* Burst sz frame */
+
+	DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
+		chdat->epnum, chdat->tx ? "tx" : "rx",
+		ch, dma_addr, chdat->transfer_len, len,
+		chdat->transfer_packet_sz, packet_sz);
+
+	/*
+	 * Prepare omap DMA for transfer
+	 */
+	if (chdat->tx) {
+		dma_params.src_amode	= OMAP_DMA_AMODE_POST_INC;
+		dma_params.src_start	= (unsigned long)dma_addr;
+		dma_params.src_ei	= 0;
+		dma_params.src_fi	= 0;
+
+		dma_params.dst_amode	= OMAP_DMA_AMODE_DOUBLE_IDX;
+		dma_params.dst_start	= (unsigned long)fifo;
+		dma_params.dst_ei	= 1;
+		dma_params.dst_fi	= -31;	/* Loop 32 byte window */
+
+		dma_params.trigger	= sync_dev;
+		dma_params.sync_mode	= OMAP_DMA_SYNC_FRAME;
+		dma_params.src_or_dst_synch	= 0;	/* Dest sync */
+
+		src_burst = OMAP_DMA_DATA_BURST_16;	/* 16x32 read */
+		dst_burst = OMAP_DMA_DATA_BURST_8;	/* 8x32 write */
+	} else {
+		dma_params.src_amode	= OMAP_DMA_AMODE_DOUBLE_IDX;
+		dma_params.src_start	= (unsigned long)fifo;
+		dma_params.src_ei	= 1;
+		dma_params.src_fi	= -31;	/* Loop 32 byte window */
+
+		dma_params.dst_amode	= OMAP_DMA_AMODE_POST_INC;
+		dma_params.dst_start	= (unsigned long)dma_addr;
+		dma_params.dst_ei	= 0;
+		dma_params.dst_fi	= 0;
+
+		dma_params.trigger	= sync_dev;
+		dma_params.sync_mode	= OMAP_DMA_SYNC_FRAME;
+		dma_params.src_or_dst_synch	= 1;	/* Source sync */
+
+		src_burst = OMAP_DMA_DATA_BURST_8;	/* 8x32 read */
+		dst_burst = OMAP_DMA_DATA_BURST_16;	/* 16x32 write */
+	}
+
+	DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
+		chdat->epnum, chdat->tx ? "tx" : "rx",
+		(dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
+		((dma_addr & 0x3) == 0) ? "sync" : "async",
+		dma_params.src_start, dma_params.dst_start);
+
+	omap_set_dma_params(ch, &dma_params);
+	omap_set_dma_src_burst_mode(ch, src_burst);
+	omap_set_dma_dest_burst_mode(ch, dst_burst);
+	omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
+
+	/*
+	 * Prepare MUSB for DMA transfer
+	 */
+	if (chdat->tx) {
+		musb_ep_select(mbase, chdat->epnum);
+		csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+		csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
+			| MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+		csr &= ~MUSB_TXCSR_P_UNDERRUN;
+		musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+	} else {
+		musb_ep_select(mbase, chdat->epnum);
+		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+		csr |= MUSB_RXCSR_DMAENAB;
+		csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
+		musb_writew(hw_ep->regs, MUSB_RXCSR,
+			csr | MUSB_RXCSR_P_WZC_BITS);
+	}
+
+	/*
+	 * Start DMA transfer
+	 */
+	omap_start_dma(ch);
+
+	if (chdat->tx) {
+		/* Send transfer_packet_sz packets at a time */
+		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+			chdat->transfer_packet_sz);
+
+		musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+	} else {
+		/* Receive transfer_packet_sz packets at a time */
+		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+			chdat->transfer_packet_sz << 16);
+
+		musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+	}
+
+	return true;
+}
+
+static int tusb_omap_dma_abort(struct dma_channel *channel)
+{
+	struct tusb_omap_dma_ch	*chdat = to_chdat(channel);
+	struct tusb_omap_dma	*tusb_dma = chdat->tusb_dma;
+
+	if (!tusb_dma->multichannel) {
+		if (tusb_dma->ch >= 0) {
+			omap_stop_dma(tusb_dma->ch);
+			omap_free_dma(tusb_dma->ch);
+			tusb_dma->ch = -1;
+		}
+
+		tusb_dma->dmareq = -1;
+		tusb_dma->sync_dev = -1;
+	}
+
+	channel->status = MUSB_DMA_STATUS_FREE;
+
+	return 0;
+}
+
+static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+	u32		reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+	int		i, dmareq_nr = -1;
+
+	const int sync_dev[6] = {
+		OMAP24XX_DMA_EXT_DMAREQ0,
+		OMAP24XX_DMA_EXT_DMAREQ1,
+		OMAP242X_DMA_EXT_DMAREQ2,
+		OMAP242X_DMA_EXT_DMAREQ3,
+		OMAP242X_DMA_EXT_DMAREQ4,
+		OMAP242X_DMA_EXT_DMAREQ5,
+	};
+
+	for (i = 0; i < MAX_DMAREQ; i++) {
+		int cur = (reg & (0xf << (i * 5))) >> (i * 5);
+		if (cur == 0) {
+			dmareq_nr = i;
+			break;
+		}
+	}
+
+	if (dmareq_nr == -1)
+		return -EAGAIN;
+
+	reg |= (chdat->epnum << (dmareq_nr * 5));
+	if (chdat->tx)
+		reg |= ((1 << 4) << (dmareq_nr * 5));
+	musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+	chdat->dmareq = dmareq_nr;
+	chdat->sync_dev = sync_dev[chdat->dmareq];
+
+	return 0;
+}
+
+static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+	u32 reg;
+
+	if (!chdat || chdat->dmareq < 0)
+		return;
+
+	reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+	reg &= ~(0x1f << (chdat->dmareq * 5));
+	musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+	chdat->dmareq = -1;
+	chdat->sync_dev = -1;
+}
+
+static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
+
+static struct dma_channel *
+tusb_omap_dma_allocate(struct dma_controller *c,
+		struct musb_hw_ep *hw_ep,
+		u8 tx)
+{
+	int ret, i;
+	const char		*dev_name;
+	struct tusb_omap_dma	*tusb_dma;
+	struct musb		*musb;
+	void __iomem		*tbase;
+	struct dma_channel	*channel = NULL;
+	struct tusb_omap_dma_ch	*chdat = NULL;
+	u32			reg;
+
+	tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+	musb = tusb_dma->musb;
+	tbase = musb->ctrl_base;
+
+	reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+	if (tx)
+		reg &= ~(1 << hw_ep->epnum);
+	else
+		reg &= ~(1 << (hw_ep->epnum + 15));
+	musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+	/* REVISIT: Why does dmareq5 not work? */
+	if (hw_ep->epnum == 0) {
+		DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
+		return NULL;
+	}
+
+	for (i = 0; i < MAX_DMAREQ; i++) {
+		struct dma_channel *ch = dma_channel_pool[i];
+		if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
+			ch->status = MUSB_DMA_STATUS_FREE;
+			channel = ch;
+			chdat = ch->private_data;
+			break;
+		}
+	}
+
+	if (!channel)
+		return NULL;
+
+	if (tx) {
+		chdat->tx = 1;
+		dev_name = "TUSB transmit";
+	} else {
+		chdat->tx = 0;
+		dev_name = "TUSB receive";
+	}
+
+	chdat->musb = tusb_dma->musb;
+	chdat->tbase = tusb_dma->tbase;
+	chdat->hw_ep = hw_ep;
+	chdat->epnum = hw_ep->epnum;
+	chdat->dmareq = -1;
+	chdat->completed_len = 0;
+	chdat->tusb_dma = tusb_dma;
+
+	channel->max_len = 0x7fffffff;
+	channel->desired_mode = 0;
+	channel->actual_len = 0;
+
+	if (tusb_dma->multichannel) {
+		ret = tusb_omap_dma_allocate_dmareq(chdat);
+		if (ret != 0)
+			goto free_dmareq;
+
+		ret = omap_request_dma(chdat->sync_dev, dev_name,
+				tusb_omap_dma_cb, channel, &chdat->ch);
+		if (ret != 0)
+			goto free_dmareq;
+	} else if (tusb_dma->ch == -1) {
+		tusb_dma->dmareq = 0;
+		tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;
+
+		/* Callback data gets set later in the shared dmareq case */
+		ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
+				tusb_omap_dma_cb, NULL, &tusb_dma->ch);
+		if (ret != 0)
+			goto free_dmareq;
+
+		chdat->dmareq = -1;
+		chdat->ch = -1;
+	}
+
+	DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
+		chdat->epnum,
+		chdat->tx ? "tx" : "rx",
+		chdat->ch >= 0 ? "dedicated" : "shared",
+		chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
+		chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
+		chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);
+
+	return channel;
+
+free_dmareq:
+	tusb_omap_dma_free_dmareq(chdat);
+
+	DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum);
+	channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+	return NULL;
+}
+
+static void tusb_omap_dma_release(struct dma_channel *channel)
+{
+	struct tusb_omap_dma_ch	*chdat = to_chdat(channel);
+	struct musb		*musb = chdat->musb;
+	void __iomem		*tbase = musb->ctrl_base;
+	u32			reg;
+
+	DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch);
+
+	reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+	if (chdat->tx)
+		reg |= (1 << chdat->epnum);
+	else
+		reg |= (1 << (chdat->epnum + 15));
+	musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+	reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR);
+	if (chdat->tx)
+		reg |= (1 << chdat->epnum);
+	else
+		reg |= (1 << (chdat->epnum + 15));
+	musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg);
+
+	channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+	if (chdat->ch >= 0) {
+		omap_stop_dma(chdat->ch);
+		omap_free_dma(chdat->ch);
+		chdat->ch = -1;
+	}
+
+	if (chdat->dmareq >= 0)
+		tusb_omap_dma_free_dmareq(chdat);
+
+	channel = NULL;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+	struct tusb_omap_dma	*tusb_dma;
+	int			i;
+
+	tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+	for (i = 0; i < MAX_DMAREQ; i++) {
+		struct dma_channel *ch = dma_channel_pool[i];
+		if (ch) {
+			kfree(ch->private_data);
+			kfree(ch);
+		}
+	}
+
+	if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
+		omap_free_dma(tusb_dma->ch);
+
+	kfree(tusb_dma);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *base)
+{
+	void __iomem		*tbase = musb->ctrl_base;
+	struct tusb_omap_dma	*tusb_dma;
+	int			i;
+
+	/* REVISIT: Get dmareq lines used from board-*.c */
+
+	musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
+	musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
+
+	musb_writel(tbase, TUSB_DMA_REQ_CONF,
+		TUSB_DMA_REQ_CONF_BURST_SIZE(2)
+		| TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
+		| TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+	tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
+	if (!tusb_dma)
+		goto cleanup;
+
+	tusb_dma->musb = musb;
+	tusb_dma->tbase = musb->ctrl_base;
+
+	tusb_dma->ch = -1;
+	tusb_dma->dmareq = -1;
+	tusb_dma->sync_dev = -1;
+
+	tusb_dma->controller.start = tusb_omap_dma_start;
+	tusb_dma->controller.stop = tusb_omap_dma_stop;
+	tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
+	tusb_dma->controller.channel_release = tusb_omap_dma_release;
+	tusb_dma->controller.channel_program = tusb_omap_dma_program;
+	tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
+
+	if (tusb_get_revision(musb) >= TUSB_REV_30)
+		tusb_dma->multichannel = 1;
+
+	for (i = 0; i < MAX_DMAREQ; i++) {
+		struct dma_channel	*ch;
+		struct tusb_omap_dma_ch	*chdat;
+
+		ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
+		if (!ch)
+			goto cleanup;
+
+		dma_channel_pool[i] = ch;
+
+		chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
+		if (!chdat)
+			goto cleanup;
+
+		ch->status = MUSB_DMA_STATUS_UNKNOWN;
+		ch->private_data = chdat;
+	}
+
+	return &tusb_dma->controller;
+
+cleanup:
+	dma_controller_destroy(&tusb_dma->controller);
+
+	return NULL;
+}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 8878c17..70338f4 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -499,9 +499,10 @@
 config USB_SERIAL_SIERRAWIRELESS
 	tristate "USB Sierra Wireless Driver"
 	help
-	  Say M here if you want to use a Sierra Wireless device (if
-	  using an PC 5220 or AC580 please use the Airprime driver
-	  instead).
+	  Say M here if you want to use Sierra Wireless devices.
+
+	  Many deviecs have a feature known as TRU-Install, for those devices
+	  to work properly the USB Storage Sierra feature must be enabled.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called sierra.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8387172..984f6ef 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -563,6 +563,7 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
 	{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
 	{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
 	{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -637,6 +638,7 @@
 	{ USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) },
 	{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
 	{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
@@ -646,6 +648,10 @@
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID),
+		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
+		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
 	{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
 	{ },					/* Optional parameter entry */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index a577ea4..382265b 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -524,7 +524,9 @@
 #define FTDI_ELV_WS300PC_PID	0xE0F6	/* PC-Wetterstation (WS 300 PC) */
 #define FTDI_ELV_FHZ1300PC_PID	0xE0E8	/* FHZ 1300 PC */
 #define FTDI_ELV_WS500_PID	0xE0E9	/* PC-Wetterstation (WS 500) */
+#define FTDI_ELV_HS485_PID	0xE0EA	/* USB to RS-485 adapter */
 #define FTDI_ELV_EM1010PC_PID	0xE0EF	/* Engery monitor EM 1010 PC */
+#define FTDI_PHI_FISCO_PID      0xE40B  /* PHI Fisco USB to Serial cable */
 
 /*
  * Definitions for ID TECH (www.idt-net.com) devices
@@ -815,6 +817,11 @@
 #define OLIMEX_VID			0x15BA
 #define OLIMEX_ARM_USB_OCD_PID		0x0003
 
+/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
+/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
+#define LMI_LM3S_DEVEL_BOARD_PID	0xbcd8
+#define LMI_LM3S_EVAL_BOARD_PID		0xbcd9
+
 /* www.elsterelectricity.com Elster Unicom III Optical Probe */
 #define FTDI_ELSTER_UNICOM_PID		0xE700 /* Product Id */
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e4eca95..e143198 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -186,6 +186,23 @@
 #define BANDRICH_VENDOR_ID			0x1A8D
 #define BANDRICH_PRODUCT_C100_1			0x1002
 #define BANDRICH_PRODUCT_C100_2			0x1003
+#define BANDRICH_PRODUCT_1004			0x1004
+#define BANDRICH_PRODUCT_1005			0x1005
+#define BANDRICH_PRODUCT_1006			0x1006
+#define BANDRICH_PRODUCT_1007			0x1007
+#define BANDRICH_PRODUCT_1008			0x1008
+#define BANDRICH_PRODUCT_1009			0x1009
+#define BANDRICH_PRODUCT_100A			0x100a
+
+#define BANDRICH_PRODUCT_100B			0x100b
+#define BANDRICH_PRODUCT_100C			0x100c
+#define BANDRICH_PRODUCT_100D			0x100d
+#define BANDRICH_PRODUCT_100E			0x100e
+
+#define BANDRICH_PRODUCT_100F			0x100f
+#define BANDRICH_PRODUCT_1010			0x1010
+#define BANDRICH_PRODUCT_1011			0x1011
+#define BANDRICH_PRODUCT_1012			0x1012
 
 #define AMOI_VENDOR_ID			0x1614
 #define AMOI_PRODUCT_9508			0x0800
@@ -197,6 +214,10 @@
 #define TELIT_VENDOR_ID				0x1bc7
 #define TELIT_PRODUCT_UC864E			0x1003
 
+/* ZTE PRODUCTS */
+#define ZTE_VENDOR_ID				0x19d2
+#define ZTE_PRODUCT_MF628			0x0015
+
 static struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -302,12 +323,28 @@
 	{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
 	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
 	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) },
+	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) },
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -346,11 +383,7 @@
 	.read_int_callback = option_instat_callback,
 };
 
-#ifdef CONFIG_USB_DEBUG
 static int debug;
-#else
-#define debug 0
-#endif
 
 /* per port private data */
 
@@ -954,8 +987,5 @@
 MODULE_VERSION(DRIVER_VERSION);
 MODULE_LICENSE("GPL");
 
-#ifdef CONFIG_USB_DEBUG
 module_param(debug, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Debug messages");
-#endif
-
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 2c9c446..1ede144 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -90,7 +90,6 @@
 	{ USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
 	{ USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
 	{ USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
-	{ USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) },
 	{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
 	{ }					/* Terminating entry */
 };
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 6ac3bbc..a3bd039 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -107,10 +107,6 @@
 #define COREGA_VENDOR_ID	0x07aa
 #define COREGA_PRODUCT_ID	0x002a
 
-/* HL HL-340 (ID: 4348:5523) */
-#define HL340_VENDOR_ID		0x4348
-#define HL340_PRODUCT_ID	0x5523
-
 /* Y.C. Cable U.S.A., Inc - USB to RS-232 */
 #define YCCABLE_VENDOR_ID	0x05ad
 #define YCCABLE_PRODUCT_ID	0x0fba
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 2f6f152..7060337 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -14,7 +14,7 @@
   Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
 */
 
-#define DRIVER_VERSION "v.1.2.9c"
+#define DRIVER_VERSION "v.1.2.13a"
 #define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
 #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
 
@@ -31,6 +31,7 @@
 #define SWIMS_USB_REQUEST_SetPower	0x00
 #define SWIMS_USB_REQUEST_SetNmea	0x07
 #define SWIMS_USB_REQUEST_SetMode	0x0B
+#define SWIMS_USB_REQUEST_GetSwocInfo	0x0A
 #define SWIMS_SET_MODE_Modem		0x0001
 
 /* per port private data */
@@ -40,18 +41,11 @@
 
 static int debug;
 static int nmea;
-static int truinstall = 1;
-
-enum devicetype {
-	DEVICE_3_PORT =		0,
-	DEVICE_1_PORT =		1,
-	DEVICE_INSTALLER =	2,
-};
 
 static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
 {
 	int result;
-	dev_dbg(&udev->dev, "%s", "SET POWER STATE\n");
+	dev_dbg(&udev->dev, "%s", __func__);
 	result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
 			SWIMS_USB_REQUEST_SetPower,	/* __u8 request      */
 			USB_TYPE_VENDOR,		/* __u8 request type */
@@ -63,25 +57,10 @@
 	return result;
 }
 
-static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
-{
-	int result;
-	dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH\n");
-	result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
-			SWIMS_USB_REQUEST_SetMode,	/* __u8 request      */
-			USB_TYPE_VENDOR,		/* __u8 request type */
-			eSWocMode,			/* __u16 value       */
-			0x0000,				/* __u16 index       */
-			NULL,				/* void *data        */
-			0,				/* __u16 size 	     */
-			USB_CTRL_SET_TIMEOUT);		/* int timeout       */
-	return result;
-}
-
 static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
 {
 	int result;
-	dev_dbg(&udev->dev, "%s", "NMEA Enable sent\n");
+	dev_dbg(&udev->dev, "%s", __func__);
 	result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
 			SWIMS_USB_REQUEST_SetNmea,	/* __u8 request      */
 			USB_TYPE_VENDOR,		/* __u8 request type */
@@ -97,6 +76,7 @@
 {
 	int result;
 	int *num_ports = usb_get_serial_data(serial);
+	dev_dbg(&serial->dev->dev, "%s", __func__);
 
 	result = *num_ports;
 
@@ -110,22 +90,23 @@
 
 static int sierra_calc_interface(struct usb_serial *serial)
 {
-		int interface;
-		struct usb_interface *p_interface;
-		struct usb_host_interface *p_host_interface;
+	int interface;
+	struct usb_interface *p_interface;
+	struct usb_host_interface *p_host_interface;
+	dev_dbg(&serial->dev->dev, "%s", __func__);
 
-		/* Get the interface structure pointer from the serial struct */
-		p_interface = serial->interface;
+	/* Get the interface structure pointer from the serial struct */
+	p_interface = serial->interface;
 
-		/* Get a pointer to the host interface structure */
-		p_host_interface = p_interface->cur_altsetting;
+	/* Get a pointer to the host interface structure */
+	p_host_interface = p_interface->cur_altsetting;
 
-		/* read the interface descriptor for this active altsetting
-		 * to find out the interface number we are on
-		*/
-		interface = p_host_interface->desc.bInterfaceNumber;
+	/* read the interface descriptor for this active altsetting
+	 * to find out the interface number we are on
+	*/
+	interface = p_host_interface->desc.bInterfaceNumber;
 
-		return interface;
+	return interface;
 }
 
 static int sierra_probe(struct usb_serial *serial,
@@ -135,43 +116,40 @@
 	struct usb_device *udev;
 	int *num_ports;
 	u8 ifnum;
+	u8 numendpoints;
+
+	dev_dbg(&serial->dev->dev, "%s", __func__);
 
 	num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
 	if (!num_ports)
 		return -ENOMEM;
 
 	ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+	numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
 	udev = serial->dev;
 
-		/* Figure out the interface number from the serial structure */
-		ifnum = sierra_calc_interface(serial);
+	/* Figure out the interface number from the serial structure */
+	ifnum = sierra_calc_interface(serial);
 
-		/*
-		 * If this interface supports more than 1 alternate
-		 * select the 2nd one
-		 */
-		if (serial->interface->num_altsetting == 2) {
-			dev_dbg(&udev->dev,
-				"Selecting alt setting for interface %d\n",
-				ifnum);
+	/*
+	 * If this interface supports more than 1 alternate
+	 * select the 2nd one
+	 */
+	if (serial->interface->num_altsetting == 2) {
+		dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n",
+			ifnum);
+		/* We know the alternate setting is 1 for the MC8785 */
+		usb_set_interface(udev, ifnum, 1);
+	}
 
-			/* We know the alternate setting is 1 for the MC8785 */
-			usb_set_interface(udev, ifnum, 1);
-		}
-
-	/* Check if in installer mode */
-	if (truinstall && id->driver_info == DEVICE_INSTALLER) {
-		dev_dbg(&udev->dev, "%s", "FOUND TRU-INSTALL DEVICE(SW)\n");
-		result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
-		/* Don't bind to the device when in installer mode */
-		kfree(num_ports);
-		return -EIO;
-	} else if (id->driver_info == DEVICE_1_PORT)
-		*num_ports = 1;
-	else if (ifnum == 0x99)
+	/* Dummy interface present on some SKUs should be ignored */
+	if (ifnum == 0x99)
 		*num_ports = 0;
+	else if (numendpoints <= 3)
+		*num_ports = 1;
 	else
-		*num_ports = 3;
+		*num_ports = (numendpoints-1)/2;
+
 	/*
 	 * save off our num_ports info so that we can use it in the
 	 * calc_num_ports callback
@@ -187,40 +165,50 @@
 	{ USB_DEVICE(0x1199, 0x0218) },	/* Sierra Wireless MC5720 */
 	{ USB_DEVICE(0x0f30, 0x1b1d) },	/* Sierra Wireless MC5720 */
 	{ USB_DEVICE(0x1199, 0x0020) },	/* Sierra Wireless MC5725 */
+	{ USB_DEVICE(0x1199, 0x0024) },	/* Sierra Wireless MC5727 */
 	{ USB_DEVICE(0x1199, 0x0220) },	/* Sierra Wireless MC5725 */
 	{ USB_DEVICE(0x1199, 0x0019) },	/* Sierra Wireless AirCard 595 */
 	{ USB_DEVICE(0x1199, 0x0021) },	/* Sierra Wireless AirCard 597E */
 	{ USB_DEVICE(0x1199, 0x0120) },	/* Sierra Wireless USB Dongle 595U */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless C597 */
+	 /* Sierra Wireless C597 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
+	 /* Sierra Wireless Device */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
+	{ USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */
 
 	{ USB_DEVICE(0x1199, 0x6802) },	/* Sierra Wireless MC8755 */
 	{ USB_DEVICE(0x1199, 0x6804) },	/* Sierra Wireless MC8755 */
 	{ USB_DEVICE(0x1199, 0x6803) },	/* Sierra Wireless MC8765 */
 	{ USB_DEVICE(0x1199, 0x6812) },	/* Sierra Wireless MC8775 & AC 875U */
-	{ USB_DEVICE(0x1199, 0x6813) },	/* Sierra Wireless MC8775 (Thinkpad internal) */
+	{ USB_DEVICE(0x1199, 0x6813) },	/* Sierra Wireless MC8775 (Lenovo) */
 	{ USB_DEVICE(0x1199, 0x6815) },	/* Sierra Wireless MC8775 */
 	{ USB_DEVICE(0x03f0, 0x1e1d) },	/* HP hs2300 a.k.a MC8775 */
 	{ USB_DEVICE(0x1199, 0x6820) },	/* Sierra Wireless AirCard 875 */
 	{ USB_DEVICE(0x1199, 0x6821) },	/* Sierra Wireless AirCard 875U */
-	{ USB_DEVICE(0x1199, 0x6832) },	/* Sierra Wireless MC8780*/
-	{ USB_DEVICE(0x1199, 0x6833) },	/* Sierra Wireless MC8781*/
-	{ USB_DEVICE(0x1199, 0x683B), .driver_info = DEVICE_1_PORT },	/* Sierra Wireless MC8785 Composite*/
+	{ USB_DEVICE(0x1199, 0x6832) },	/* Sierra Wireless MC8780 */
+	{ USB_DEVICE(0x1199, 0x6833) },	/* Sierra Wireless MC8781 */
+	{ USB_DEVICE(0x1199, 0x683B) },	/* Sierra Wireless MC8785 Composite */
+	{ USB_DEVICE(0x1199, 0x683C) },	/* Sierra Wireless MC8790 */
+	{ USB_DEVICE(0x1199, 0x683D) },	/* Sierra Wireless MC8790 */
+	{ USB_DEVICE(0x1199, 0x683E) },	/* Sierra Wireless MC8790 */
 	{ USB_DEVICE(0x1199, 0x6850) },	/* Sierra Wireless AirCard 880 */
 	{ USB_DEVICE(0x1199, 0x6851) },	/* Sierra Wireless AirCard 881 */
 	{ USB_DEVICE(0x1199, 0x6852) },	/* Sierra Wireless AirCard 880 E */
 	{ USB_DEVICE(0x1199, 0x6853) },	/* Sierra Wireless AirCard 881 E */
 	{ USB_DEVICE(0x1199, 0x6855) },	/* Sierra Wireless AirCard 880 U */
 	{ USB_DEVICE(0x1199, 0x6856) },	/* Sierra Wireless AirCard 881 U */
-	{ USB_DEVICE(0x1199, 0x6859), .driver_info = DEVICE_1_PORT },	/* Sierra Wireless AirCard 885 E */
-	{ USB_DEVICE(0x1199, 0x685A), .driver_info = DEVICE_1_PORT },	/* Sierra Wireless AirCard 885 E */
+	{ USB_DEVICE(0x1199, 0x6859) },	/* Sierra Wireless AirCard 885 E */
+	{ USB_DEVICE(0x1199, 0x685A) },	/* Sierra Wireless AirCard 885 E */
+	/* Sierra Wireless C885 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
+	/* Sierra Wireless Device */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
+	/* Sierra Wireless Device */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
 
-	{ USB_DEVICE(0x1199, 0x6468) }, /* Sierra Wireless MP3G - EVDO */
-	{ USB_DEVICE(0x1199, 0x6469) }, /* Sierra Wireless MP3G - UMTS/HSPA */
+	{ USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
+	{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
 
-	{ USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */
-	{ USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */
-
-	{ USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER},
 	{ }
 };
 MODULE_DEVICE_TABLE(usb, id_table);
@@ -268,13 +256,19 @@
 		if (portdata->rts_state)
 			val |= 0x02;
 
-		/* Determine which port is targeted */
-		if (port->bulk_out_endpointAddress == 2)
-			interface = 0;
-		else if (port->bulk_out_endpointAddress == 4)
-			interface = 1;
-		else if (port->bulk_out_endpointAddress == 5)
-			interface = 2;
+		/* If composite device then properly report interface */
+		if (serial->num_ports == 1)
+			interface = sierra_calc_interface(serial);
+
+		/* Otherwise the need to do non-composite mapping */
+		else {
+			if (port->bulk_out_endpointAddress == 2)
+				interface = 0;
+			else if (port->bulk_out_endpointAddress == 4)
+				interface = 1;
+			else if (port->bulk_out_endpointAddress == 5)
+				interface = 2;
+		}
 
 		return usb_control_msg(serial->dev,
 				usb_rcvctrlpipe(serial->dev, 0),
@@ -713,7 +707,7 @@
 static struct usb_serial_driver sierra_device = {
 	.driver = {
 		.owner =	THIS_MODULE,
-		.name =		"sierra1",
+		.name =		"sierra",
 	},
 	.description       = "Sierra USB modem",
 	.id_table          = id_table,
@@ -769,14 +763,8 @@
 MODULE_VERSION(DRIVER_VERSION);
 MODULE_LICENSE("GPL");
 
-module_param(truinstall, bool, 0);
-MODULE_PARM_DESC(truinstall, "TRU-Install support");
-
-module_param(nmea, bool, 0);
+module_param(nmea, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(nmea, "NMEA streaming");
 
-#ifdef CONFIG_USB_DEBUG
 module_param(debug, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Debug messages");
-#endif
-
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8c2d531..b157c48 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -122,9 +122,6 @@
 
 	dbg("%s", __func__);
 
-	if (serial == NULL)
-		return;
-
 	for (i = 0; i < serial->num_ports; ++i)
 		serial_table[serial->minor + i] = NULL;
 }
@@ -142,7 +139,8 @@
 	serial->type->shutdown(serial);
 
 	/* return the minor range that this device had */
-	return_serial(serial);
+	if (serial->minor != SERIAL_TTY_NO_MINOR)
+		return_serial(serial);
 
 	for (i = 0; i < serial->num_ports; ++i)
 		serial->port[i]->port.count = 0;
@@ -575,6 +573,7 @@
 	serial->interface = interface;
 	kref_init(&serial->kref);
 	mutex_init(&serial->disc_mutex);
+	serial->minor = SERIAL_TTY_NO_MINOR;
 
 	return serial;
 }
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 3d92496..c760346 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -146,6 +146,18 @@
 	  on the resulting scsi device node returns the Karma to normal
 	  operation.
 
+config USB_STORAGE_SIERRA
+	bool "Sierra Wireless TRU-Install Feature Support"
+	depends on USB_STORAGE
+	help
+	  Say Y here to include additional code to support Sierra Wireless
+	  products with the TRU-Install feature (e.g., AC597E, AC881U).
+
+	  This code switches the Sierra Wireless device from being in
+	  Mass Storage mode to Modem mode. It also has the ability to
+	  support host software upgrades should full Linux support be added
+	  to TRU-Install.
+
 config USB_STORAGE_CYPRESS_ATACB
 	bool "SAT emulation on Cypress USB/ATA Bridge with ATACB"
 	depends on USB_STORAGE
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index 4c596c7..bc3415b 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -21,6 +21,7 @@
 usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA)	+= alauda.o
 usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH)	+= onetouch.o
 usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA)	+= karma.o
+usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA)	+= sierra_ms.o
 usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o
 
 usb-storage-objs :=	scsiglue.o protocol.o transport.o usb.o \
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
new file mode 100644
index 0000000..4359a2c
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.c
@@ -0,0 +1,207 @@
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/usb.h>
+
+#include "usb.h"
+#include "transport.h"
+#include "protocol.h"
+#include "scsiglue.h"
+#include "sierra_ms.h"
+#include "debug.h"
+
+#define SWIMS_USB_REQUEST_SetSwocMode	0x0B
+#define SWIMS_USB_REQUEST_GetSwocInfo	0x0A
+#define SWIMS_USB_INDEX_SetMode		0x0000
+#define SWIMS_SET_MODE_Modem		0x0001
+
+#define TRU_NORMAL 			0x01
+#define TRU_FORCE_MS 			0x02
+#define TRU_FORCE_MODEM 		0x03
+
+static unsigned int swi_tru_install = 1;
+module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def),"
+		 " 2=Force CD-Rom, 3=Force Modem)");
+
+struct swoc_info {
+	__u8 rev;
+	__u8 reserved[8];
+	__u16 LinuxSKU;
+	__u16 LinuxVer;
+	__u8 reserved2[47];
+} __attribute__((__packed__));
+
+static bool containsFullLinuxPackage(struct swoc_info *swocInfo)
+{
+	if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) ||
+	   (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF))
+		return true;
+	else
+		return false;
+}
+
+static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
+{
+	int result;
+	US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n");
+	result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+			SWIMS_USB_REQUEST_SetSwocMode,	/* __u8 request      */
+			USB_TYPE_VENDOR | USB_DIR_OUT,	/* __u8 request type */
+			eSWocMode,			/* __u16 value       */
+			0x0000,				/* __u16 index       */
+			NULL,				/* void *data        */
+			0,				/* __u16 size 	     */
+			USB_CTRL_SET_TIMEOUT);		/* int timeout       */
+	return result;
+}
+
+
+static int sierra_get_swoc_info(struct usb_device *udev,
+				struct swoc_info *swocInfo)
+{
+	int result;
+
+	US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n");
+
+	result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+			SWIMS_USB_REQUEST_GetSwocInfo,	/* __u8 request      */
+			USB_TYPE_VENDOR | USB_DIR_IN,	/* __u8 request type */
+			0,				/* __u16 value       */
+			0,				/* __u16 index       */
+			(void *) swocInfo,		/* void *data        */
+			sizeof(struct swoc_info),	/* __u16 size 	     */
+			USB_CTRL_SET_TIMEOUT);		/* int timeout 	     */
+
+	swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU);
+	swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer);
+	return result;
+}
+
+static void debug_swoc(struct swoc_info *swocInfo)
+{
+	US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev);
+	US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU);
+	US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer);
+}
+
+
+static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct swoc_info *swocInfo;
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usb_device *udev = interface_to_usbdev(intf);
+	int result;
+	if (swi_tru_install == TRU_FORCE_MS) {
+		result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n");
+	} else {
+		swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL);
+		if (!swocInfo) {
+			US_DEBUGP("SWIMS: Allocation failure\n");
+			snprintf(buf, PAGE_SIZE, "Error\n");
+			return -ENOMEM;
+		}
+		result = sierra_get_swoc_info(udev, swocInfo);
+		if (result < 0) {
+			US_DEBUGP("SWIMS: failed SWoC query\n");
+			kfree(swocInfo);
+			snprintf(buf, PAGE_SIZE, "Error\n");
+			return -EIO;
+		}
+		debug_swoc(swocInfo);
+		result = snprintf(buf, PAGE_SIZE,
+			"REV=%02d SKU=%04X VER=%04X\n",
+			swocInfo->rev,
+			swocInfo->LinuxSKU,
+			swocInfo->LinuxVer);
+		kfree(swocInfo);
+	}
+	return result;
+}
+static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
+
+int sierra_ms_init(struct us_data *us)
+{
+	int result, retries;
+	signed long delay_t;
+	struct swoc_info *swocInfo;
+	struct usb_device *udev;
+	struct Scsi_Host *sh;
+	struct scsi_device *sd;
+
+	delay_t = 2;
+	retries = 3;
+	result = 0;
+	udev = us->pusb_dev;
+
+	sh = us_to_host(us);
+	sd = scsi_get_host_dev(sh);
+
+	US_DEBUGP("SWIMS: sierra_ms_init called\n");
+
+	/* Force Modem mode */
+	if (swi_tru_install == TRU_FORCE_MODEM) {
+		US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n");
+		result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
+		if (result < 0)
+			US_DEBUGP("SWIMS: Failed to switch to modem mode.\n");
+		return -EIO;
+	}
+	/* Force Mass Storage mode (keep CD-Rom) */
+	else if (swi_tru_install == TRU_FORCE_MS) {
+		US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n");
+		goto complete;
+	}
+	/* Normal TRU-Install Logic */
+	else {
+		US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n");
+
+		swocInfo = kmalloc(sizeof(struct swoc_info),
+				GFP_KERNEL);
+		if (!swocInfo) {
+			US_DEBUGP("SWIMS: %s", "Allocation failure\n");
+			return -ENOMEM;
+		}
+
+		retries = 3;
+		do {
+			retries--;
+			result = sierra_get_swoc_info(udev, swocInfo);
+			if (result < 0) {
+				US_DEBUGP("SWIMS: %s", "Failed SWoC query\n");
+				schedule_timeout_uninterruptible(2*HZ);
+			}
+		} while (retries && result < 0);
+
+		if (result < 0) {
+			US_DEBUGP("SWIMS: %s",
+				  "Completely failed SWoC query\n");
+			kfree(swocInfo);
+			return -EIO;
+		}
+
+		debug_swoc(swocInfo);
+
+		/* If there is not Linux software on the TRU-Install device
+		 * then switch to modem mode
+		 */
+		if (!containsFullLinuxPackage(swocInfo)) {
+			US_DEBUGP("SWIMS: %s",
+				"Switching to Modem Mode\n");
+			result = sierra_set_ms_mode(udev,
+				SWIMS_SET_MODE_Modem);
+			if (result < 0)
+				US_DEBUGP("SWIMS: Failed to switch modem\n");
+			kfree(swocInfo);
+			return -EIO;
+		}
+		kfree(swocInfo);
+	}
+complete:
+	result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
+
+	return USB_STOR_TRANSPORT_GOOD;
+}
+
diff --git a/drivers/usb/storage/sierra_ms.h b/drivers/usb/storage/sierra_ms.h
new file mode 100644
index 0000000..bb48634
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.h
@@ -0,0 +1,4 @@
+#ifndef _SIERRA_MS_H_
+#define _SIERRA_MS_H_
+extern int sierra_ms_init(struct us_data *us);
+#endif
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index fcbbfdb..3523a0b 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1032,8 +1032,21 @@
 
 	/* try to compute the actual residue, based on how much data
 	 * was really transferred and what the device tells us */
-	if (residue) {
-		if (!(us->fflags & US_FL_IGNORE_RESIDUE)) {
+	if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
+
+		/* Heuristically detect devices that generate bogus residues
+		 * by seeing what happens with INQUIRY and READ CAPACITY
+		 * commands.
+		 */
+		if (bcs->Status == US_BULK_STAT_OK &&
+				scsi_get_resid(srb) == 0 &&
+					((srb->cmnd[0] == INQUIRY &&
+						transfer_length == 36) ||
+					(srb->cmnd[0] == READ_CAPACITY &&
+						transfer_length == 8))) {
+			us->fflags |= US_FL_IGNORE_RESIDUE;
+
+		} else {
 			residue = min(residue, transfer_length);
 			scsi_set_resid(srb, max(scsi_get_resid(srb),
 			                                       (int) residue));
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 7ae69f5..ba412e6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -225,6 +225,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_MAX_SECTORS_64 ),
 
+/* Reported by Cedric Godin <cedric@belbone.be> */
+UNUSUAL_DEV(  0x0421, 0x04b9, 0x0551, 0x0551,
+		"Nokia",
+		"5300",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY ),
+
 /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
 UNUSUAL_DEV(  0x0424, 0x0fdc, 0x0210, 0x0210,
 		"SMSC",
@@ -356,14 +363,14 @@
 		US_FL_FIX_CAPACITY),
 
 /* Reported by Emil Larsson <emil@swip.net> */
-UNUSUAL_DEV(  0x04b0, 0x0411, 0x0100, 0x0110,
+UNUSUAL_DEV(  0x04b0, 0x0411, 0x0100, 0x0111,
 		"NIKON",
 		"NIKON DSC D80",
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_CAPACITY),
 
 /* Reported by Ortwin Glueck <odi@odi.ch> */
-UNUSUAL_DEV(  0x04b0, 0x0413, 0x0110, 0x0110,
+UNUSUAL_DEV(  0x04b0, 0x0413, 0x0110, 0x0111,
 		"NIKON",
 		"NIKON DSC D40",
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -1185,6 +1192,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_INQUIRY ),
 
+/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
+UNUSUAL_DEV(  0x07c4, 0xa4a5, 0x0000, 0xffff,
+		"Simple Tech/Datafab",
+		"CF+SM Reader",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_IGNORE_RESIDUE ),
+
 /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
  * to the USB storage specification in two ways:
  * - They tell us they are using transport protocol CBI. In reality they
@@ -1562,6 +1576,7 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		0),
 
+#ifdef CONFIG_USB_STORAGE_SIERRA
 /* Reported by Kevin Lloyd <linux@sierrawireless.com>
  * Entry is needed for the initializer function override,
  * which instructs the device to load as a modem
@@ -1570,8 +1585,9 @@
 UNUSUAL_DEV(  0x1199, 0x0fff, 0x0000, 0x9999,
 		"Sierra Wireless",
 		"USB MMC Storage",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_IGNORE_DEVICE),
+		US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init,
+		0),
+#endif
 
 /* Reported by Jaco Kroon <jaco@kroon.co.za>
  * The usb-storage module found on the Digitech GNX4 (and supposedly other
@@ -1743,6 +1759,15 @@
 		US_FL_FIX_CAPACITY),
 
 /*
+ * Patch by Jost Diederichs <jost@qdusa.com>
+ */
+UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999,
+		"Motorola Inc.",
+		"Motorola Phone (RAZRV3xx)",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY),
+
+/*
  * Patch by Constantin Baranov <const@tltsu.ru>
  * Report by Andreas Koenecke.
  * Motorola ROKR Z6.
@@ -1767,6 +1792,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_CAPACITY ),
 
+/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
+UNUSUAL_DEV(  0x4102, 0x1020, 0x0100,  0x0100,
+		"iRiver",
+		"MP3 T10",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_IGNORE_RESIDUE ),
+
 /*
  * David Härdeman <david@2gen.com>
  * The key makes the SCSI stack print confusing (but harmless) messages
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bfea851..73679aa 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -102,6 +102,9 @@
 #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
 #include "cypress_atacb.h"
 #endif
+#ifdef CONFIG_USB_STORAGE_SIERRA
+#include "sierra_ms.h"
+#endif
 
 /* Some informational data */
 MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index c4e7d72..89d2fb7 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -30,16 +30,16 @@
 
 static struct config_group *space_list;
 static struct config_group *comm_list;
-static struct comm *local_comm;
+static struct dlm_comm *local_comm;
 
-struct clusters;
-struct cluster;
-struct spaces;
-struct space;
-struct comms;
-struct comm;
-struct nodes;
-struct node;
+struct dlm_clusters;
+struct dlm_cluster;
+struct dlm_spaces;
+struct dlm_space;
+struct dlm_comms;
+struct dlm_comm;
+struct dlm_nodes;
+struct dlm_node;
 
 static struct config_group *make_cluster(struct config_group *, const char *);
 static void drop_cluster(struct config_group *, struct config_item *);
@@ -68,17 +68,22 @@
 static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
 			  const char *buf, size_t len);
 
-static ssize_t comm_nodeid_read(struct comm *cm, char *buf);
-static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len);
-static ssize_t comm_local_read(struct comm *cm, char *buf);
-static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len);
-static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len);
-static ssize_t node_nodeid_read(struct node *nd, char *buf);
-static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len);
-static ssize_t node_weight_read(struct node *nd, char *buf);
-static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len);
+static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf);
+static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
+				size_t len);
+static ssize_t comm_local_read(struct dlm_comm *cm, char *buf);
+static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
+				size_t len);
+static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf,
+				size_t len);
+static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf);
+static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
+				size_t len);
+static ssize_t node_weight_read(struct dlm_node *nd, char *buf);
+static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
+				size_t len);
 
-struct cluster {
+struct dlm_cluster {
 	struct config_group group;
 	unsigned int cl_tcp_port;
 	unsigned int cl_buffer_size;
@@ -109,11 +114,11 @@
 
 struct cluster_attribute {
 	struct configfs_attribute attr;
-	ssize_t (*show)(struct cluster *, char *);
-	ssize_t (*store)(struct cluster *, const char *, size_t);
+	ssize_t (*show)(struct dlm_cluster *, char *);
+	ssize_t (*store)(struct dlm_cluster *, const char *, size_t);
 };
 
-static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field,
+static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
 			   int *info_field, int check_zero,
 			   const char *buf, size_t len)
 {
@@ -134,12 +139,12 @@
 }
 
 #define CLUSTER_ATTR(name, check_zero)                                        \
-static ssize_t name##_write(struct cluster *cl, const char *buf, size_t len)  \
+static ssize_t name##_write(struct dlm_cluster *cl, const char *buf, size_t len) \
 {                                                                             \
 	return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name,         \
 			   check_zero, buf, len);                             \
 }                                                                             \
-static ssize_t name##_read(struct cluster *cl, char *buf)                     \
+static ssize_t name##_read(struct dlm_cluster *cl, char *buf)                 \
 {                                                                             \
 	return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name);               \
 }                                                                             \
@@ -181,8 +186,8 @@
 
 struct comm_attribute {
 	struct configfs_attribute attr;
-	ssize_t (*show)(struct comm *, char *);
-	ssize_t (*store)(struct comm *, const char *, size_t);
+	ssize_t (*show)(struct dlm_comm *, char *);
+	ssize_t (*store)(struct dlm_comm *, const char *, size_t);
 };
 
 static struct comm_attribute comm_attr_nodeid = {
@@ -222,8 +227,8 @@
 
 struct node_attribute {
 	struct configfs_attribute attr;
-	ssize_t (*show)(struct node *, char *);
-	ssize_t (*store)(struct node *, const char *, size_t);
+	ssize_t (*show)(struct dlm_node *, char *);
+	ssize_t (*store)(struct dlm_node *, const char *, size_t);
 };
 
 static struct node_attribute node_attr_nodeid = {
@@ -248,26 +253,26 @@
 	NULL,
 };
 
-struct clusters {
+struct dlm_clusters {
 	struct configfs_subsystem subsys;
 };
 
-struct spaces {
+struct dlm_spaces {
 	struct config_group ss_group;
 };
 
-struct space {
+struct dlm_space {
 	struct config_group group;
 	struct list_head members;
 	struct mutex members_lock;
 	int members_count;
 };
 
-struct comms {
+struct dlm_comms {
 	struct config_group cs_group;
 };
 
-struct comm {
+struct dlm_comm {
 	struct config_item item;
 	int nodeid;
 	int local;
@@ -275,11 +280,11 @@
 	struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
 };
 
-struct nodes {
+struct dlm_nodes {
 	struct config_group ns_group;
 };
 
-struct node {
+struct dlm_node {
 	struct config_item item;
 	struct list_head list; /* space->members */
 	int nodeid;
@@ -372,38 +377,40 @@
 	.ct_owner = THIS_MODULE,
 };
 
-static struct cluster *to_cluster(struct config_item *i)
+static struct dlm_cluster *to_cluster(struct config_item *i)
 {
-	return i ? container_of(to_config_group(i), struct cluster, group):NULL;
+	return i ? container_of(to_config_group(i), struct dlm_cluster, group) :
+		   NULL;
 }
 
-static struct space *to_space(struct config_item *i)
+static struct dlm_space *to_space(struct config_item *i)
 {
-	return i ? container_of(to_config_group(i), struct space, group) : NULL;
+	return i ? container_of(to_config_group(i), struct dlm_space, group) :
+		   NULL;
 }
 
-static struct comm *to_comm(struct config_item *i)
+static struct dlm_comm *to_comm(struct config_item *i)
 {
-	return i ? container_of(i, struct comm, item) : NULL;
+	return i ? container_of(i, struct dlm_comm, item) : NULL;
 }
 
-static struct node *to_node(struct config_item *i)
+static struct dlm_node *to_node(struct config_item *i)
 {
-	return i ? container_of(i, struct node, item) : NULL;
+	return i ? container_of(i, struct dlm_node, item) : NULL;
 }
 
 static struct config_group *make_cluster(struct config_group *g,
 					 const char *name)
 {
-	struct cluster *cl = NULL;
-	struct spaces *sps = NULL;
-	struct comms *cms = NULL;
+	struct dlm_cluster *cl = NULL;
+	struct dlm_spaces *sps = NULL;
+	struct dlm_comms *cms = NULL;
 	void *gps = NULL;
 
-	cl = kzalloc(sizeof(struct cluster), GFP_KERNEL);
+	cl = kzalloc(sizeof(struct dlm_cluster), GFP_KERNEL);
 	gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
-	sps = kzalloc(sizeof(struct spaces), GFP_KERNEL);
-	cms = kzalloc(sizeof(struct comms), GFP_KERNEL);
+	sps = kzalloc(sizeof(struct dlm_spaces), GFP_KERNEL);
+	cms = kzalloc(sizeof(struct dlm_comms), GFP_KERNEL);
 
 	if (!cl || !gps || !sps || !cms)
 		goto fail;
@@ -443,7 +450,7 @@
 
 static void drop_cluster(struct config_group *g, struct config_item *i)
 {
-	struct cluster *cl = to_cluster(i);
+	struct dlm_cluster *cl = to_cluster(i);
 	struct config_item *tmp;
 	int j;
 
@@ -461,20 +468,20 @@
 
 static void release_cluster(struct config_item *i)
 {
-	struct cluster *cl = to_cluster(i);
+	struct dlm_cluster *cl = to_cluster(i);
 	kfree(cl->group.default_groups);
 	kfree(cl);
 }
 
 static struct config_group *make_space(struct config_group *g, const char *name)
 {
-	struct space *sp = NULL;
-	struct nodes *nds = NULL;
+	struct dlm_space *sp = NULL;
+	struct dlm_nodes *nds = NULL;
 	void *gps = NULL;
 
-	sp = kzalloc(sizeof(struct space), GFP_KERNEL);
+	sp = kzalloc(sizeof(struct dlm_space), GFP_KERNEL);
 	gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL);
-	nds = kzalloc(sizeof(struct nodes), GFP_KERNEL);
+	nds = kzalloc(sizeof(struct dlm_nodes), GFP_KERNEL);
 
 	if (!sp || !gps || !nds)
 		goto fail;
@@ -500,7 +507,7 @@
 
 static void drop_space(struct config_group *g, struct config_item *i)
 {
-	struct space *sp = to_space(i);
+	struct dlm_space *sp = to_space(i);
 	struct config_item *tmp;
 	int j;
 
@@ -517,16 +524,16 @@
 
 static void release_space(struct config_item *i)
 {
-	struct space *sp = to_space(i);
+	struct dlm_space *sp = to_space(i);
 	kfree(sp->group.default_groups);
 	kfree(sp);
 }
 
 static struct config_item *make_comm(struct config_group *g, const char *name)
 {
-	struct comm *cm;
+	struct dlm_comm *cm;
 
-	cm = kzalloc(sizeof(struct comm), GFP_KERNEL);
+	cm = kzalloc(sizeof(struct dlm_comm), GFP_KERNEL);
 	if (!cm)
 		return ERR_PTR(-ENOMEM);
 
@@ -539,7 +546,7 @@
 
 static void drop_comm(struct config_group *g, struct config_item *i)
 {
-	struct comm *cm = to_comm(i);
+	struct dlm_comm *cm = to_comm(i);
 	if (local_comm == cm)
 		local_comm = NULL;
 	dlm_lowcomms_close(cm->nodeid);
@@ -550,16 +557,16 @@
 
 static void release_comm(struct config_item *i)
 {
-	struct comm *cm = to_comm(i);
+	struct dlm_comm *cm = to_comm(i);
 	kfree(cm);
 }
 
 static struct config_item *make_node(struct config_group *g, const char *name)
 {
-	struct space *sp = to_space(g->cg_item.ci_parent);
-	struct node *nd;
+	struct dlm_space *sp = to_space(g->cg_item.ci_parent);
+	struct dlm_node *nd;
 
-	nd = kzalloc(sizeof(struct node), GFP_KERNEL);
+	nd = kzalloc(sizeof(struct dlm_node), GFP_KERNEL);
 	if (!nd)
 		return ERR_PTR(-ENOMEM);
 
@@ -578,8 +585,8 @@
 
 static void drop_node(struct config_group *g, struct config_item *i)
 {
-	struct space *sp = to_space(g->cg_item.ci_parent);
-	struct node *nd = to_node(i);
+	struct dlm_space *sp = to_space(g->cg_item.ci_parent);
+	struct dlm_node *nd = to_node(i);
 
 	mutex_lock(&sp->members_lock);
 	list_del(&nd->list);
@@ -591,11 +598,11 @@
 
 static void release_node(struct config_item *i)
 {
-	struct node *nd = to_node(i);
+	struct dlm_node *nd = to_node(i);
 	kfree(nd);
 }
 
-static struct clusters clusters_root = {
+static struct dlm_clusters clusters_root = {
 	.subsys = {
 		.su_group = {
 			.cg_item = {
@@ -625,7 +632,7 @@
 static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
 			    char *buf)
 {
-	struct cluster *cl = to_cluster(i);
+	struct dlm_cluster *cl = to_cluster(i);
 	struct cluster_attribute *cla =
 			container_of(a, struct cluster_attribute, attr);
 	return cla->show ? cla->show(cl, buf) : 0;
@@ -635,7 +642,7 @@
 			     struct configfs_attribute *a,
 			     const char *buf, size_t len)
 {
-	struct cluster *cl = to_cluster(i);
+	struct dlm_cluster *cl = to_cluster(i);
 	struct cluster_attribute *cla =
 		container_of(a, struct cluster_attribute, attr);
 	return cla->store ? cla->store(cl, buf, len) : -EINVAL;
@@ -644,7 +651,7 @@
 static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
 			 char *buf)
 {
-	struct comm *cm = to_comm(i);
+	struct dlm_comm *cm = to_comm(i);
 	struct comm_attribute *cma =
 			container_of(a, struct comm_attribute, attr);
 	return cma->show ? cma->show(cm, buf) : 0;
@@ -653,29 +660,31 @@
 static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
 			  const char *buf, size_t len)
 {
-	struct comm *cm = to_comm(i);
+	struct dlm_comm *cm = to_comm(i);
 	struct comm_attribute *cma =
 		container_of(a, struct comm_attribute, attr);
 	return cma->store ? cma->store(cm, buf, len) : -EINVAL;
 }
 
-static ssize_t comm_nodeid_read(struct comm *cm, char *buf)
+static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf)
 {
 	return sprintf(buf, "%d\n", cm->nodeid);
 }
 
-static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len)
+static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
+				 size_t len)
 {
 	cm->nodeid = simple_strtol(buf, NULL, 0);
 	return len;
 }
 
-static ssize_t comm_local_read(struct comm *cm, char *buf)
+static ssize_t comm_local_read(struct dlm_comm *cm, char *buf)
 {
 	return sprintf(buf, "%d\n", cm->local);
 }
 
-static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len)
+static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
+				size_t len)
 {
 	cm->local= simple_strtol(buf, NULL, 0);
 	if (cm->local && !local_comm)
@@ -683,7 +692,7 @@
 	return len;
 }
 
-static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len)
+static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len)
 {
 	struct sockaddr_storage *addr;
 
@@ -705,7 +714,7 @@
 static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
 			 char *buf)
 {
-	struct node *nd = to_node(i);
+	struct dlm_node *nd = to_node(i);
 	struct node_attribute *nda =
 			container_of(a, struct node_attribute, attr);
 	return nda->show ? nda->show(nd, buf) : 0;
@@ -714,29 +723,31 @@
 static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
 			  const char *buf, size_t len)
 {
-	struct node *nd = to_node(i);
+	struct dlm_node *nd = to_node(i);
 	struct node_attribute *nda =
 		container_of(a, struct node_attribute, attr);
 	return nda->store ? nda->store(nd, buf, len) : -EINVAL;
 }
 
-static ssize_t node_nodeid_read(struct node *nd, char *buf)
+static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf)
 {
 	return sprintf(buf, "%d\n", nd->nodeid);
 }
 
-static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len)
+static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
+				 size_t len)
 {
 	nd->nodeid = simple_strtol(buf, NULL, 0);
 	return len;
 }
 
-static ssize_t node_weight_read(struct node *nd, char *buf)
+static ssize_t node_weight_read(struct dlm_node *nd, char *buf)
 {
 	return sprintf(buf, "%d\n", nd->weight);
 }
 
-static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len)
+static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
+				 size_t len)
 {
 	nd->weight = simple_strtol(buf, NULL, 0);
 	return len;
@@ -746,7 +757,7 @@
  * Functions for the dlm to get the info that's been configured
  */
 
-static struct space *get_space(char *name)
+static struct dlm_space *get_space(char *name)
 {
 	struct config_item *i;
 
@@ -760,15 +771,15 @@
 	return to_space(i);
 }
 
-static void put_space(struct space *sp)
+static void put_space(struct dlm_space *sp)
 {
 	config_item_put(&sp->group.cg_item);
 }
 
-static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr)
+static struct dlm_comm *get_comm(int nodeid, struct sockaddr_storage *addr)
 {
 	struct config_item *i;
-	struct comm *cm = NULL;
+	struct dlm_comm *cm = NULL;
 	int found = 0;
 
 	if (!comm_list)
@@ -801,7 +812,7 @@
 	return cm;
 }
 
-static void put_comm(struct comm *cm)
+static void put_comm(struct dlm_comm *cm)
 {
 	config_item_put(&cm->item);
 }
@@ -810,8 +821,8 @@
 int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
 		    int **new_out, int *new_count_out)
 {
-	struct space *sp;
-	struct node *nd;
+	struct dlm_space *sp;
+	struct dlm_node *nd;
 	int i = 0, rv = 0, ids_count = 0, new_count = 0;
 	int *ids, *new;
 
@@ -874,8 +885,8 @@
 
 int dlm_node_weight(char *lsname, int nodeid)
 {
-	struct space *sp;
-	struct node *nd;
+	struct dlm_space *sp;
+	struct dlm_node *nd;
 	int w = -EEXIST;
 
 	sp = get_space(lsname);
@@ -897,7 +908,7 @@
 
 int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
 {
-	struct comm *cm = get_comm(nodeid, NULL);
+	struct dlm_comm *cm = get_comm(nodeid, NULL);
 	if (!cm)
 		return -EEXIST;
 	if (!cm->addr_count)
@@ -909,7 +920,7 @@
 
 int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
 {
-	struct comm *cm = get_comm(0, addr);
+	struct dlm_comm *cm = get_comm(0, addr);
 	if (!cm)
 		return -EEXIST;
 	*nodeid = cm->nodeid;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 929e48a..34f14a1 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -527,8 +527,10 @@
 		k32buf = (struct dlm_write_request32 *)kbuf;
 		kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) -
 			       sizeof(struct dlm_write_request32)), GFP_KERNEL);
-		if (!kbuf)
+		if (!kbuf) {
+			kfree(k32buf);
 			return -ENOMEM;
+		}
 
 		if (proc)
 			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
@@ -539,8 +541,10 @@
 
 	/* do we really need this? can a write happen after a close? */
 	if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
-	    (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)))
-		return -EINVAL;
+	    (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
+		error = -EINVAL;
+		goto out_free;
+	}
 
 	sigfillset(&allsigs);
 	sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h
deleted file mode 100644
index 3abe7e9..0000000
--- a/fs/xfs/linux-2.6/sema.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_SUPPORT_SEMA_H__
-#define __XFS_SUPPORT_SEMA_H__
-
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/semaphore.h>
-#include <asm/atomic.h>
-
-/*
- * sema_t structure just maps to struct semaphore in Linux kernel.
- */
-
-typedef struct semaphore sema_t;
-
-#define initnsema(sp, val, name)	sema_init(sp, val)
-#define psema(sp, b)			down(sp)
-#define vsema(sp)			up(sp)
-#define freesema(sema)			do { } while (0)
-
-static inline int issemalocked(sema_t *sp)
-{
-	return down_trylock(sp) || (up(sp), 0);
-}
-
-/*
- * Map cpsema (try to get the sema) to down_trylock. We need to switch
- * the return values since cpsema returns 1 (acquired) 0 (failed) and
- * down_trylock returns the reverse 0 (acquired) 1 (failed).
- */
-static inline int cpsema(sema_t *sp)
-{
-	return down_trylock(sp) ? 0 : 1;
-}
-
-#endif /* __XFS_SUPPORT_SEMA_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index fa47e43..f42f80a 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -73,7 +73,6 @@
 	unsigned long	pgoff)
 {
 	xfs_inode_t	*ip;
-	bhv_vnode_t	*vp = vn_from_inode(inode);
 	loff_t		isize = i_size_read(inode);
 	loff_t		offset = page_offset(page);
 	int		delalloc = -1, unmapped = -1, unwritten = -1;
@@ -81,7 +80,7 @@
 	if (page_has_buffers(page))
 		xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
 
-	ip = xfs_vtoi(vp);
+	ip = XFS_I(inode);
 	if (!ip->i_rwtrace)
 		return;
 
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 9cc8f02..986061a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -58,7 +58,7 @@
 		bp, id,
 		(void *)(unsigned long)bp->b_flags,
 		(void *)(unsigned long)bp->b_hold.counter,
-		(void *)(unsigned long)bp->b_sema.count.counter,
+		(void *)(unsigned long)bp->b_sema.count,
 		(void *)current,
 		data, ra,
 		(void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
@@ -253,7 +253,7 @@
 
 	memset(bp, 0, sizeof(xfs_buf_t));
 	atomic_set(&bp->b_hold, 1);
-	init_MUTEX_LOCKED(&bp->b_iodonesema);
+	init_completion(&bp->b_iowait);
 	INIT_LIST_HEAD(&bp->b_list);
 	INIT_LIST_HEAD(&bp->b_hash_list);
 	init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
@@ -838,6 +838,7 @@
 		return;
 	}
 
+	ASSERT(atomic_read(&bp->b_hold) > 0);
 	if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
 		if (bp->b_relse) {
 			atomic_inc(&bp->b_hold);
@@ -851,11 +852,6 @@
 			spin_unlock(&hash->bh_lock);
 			xfs_buf_free(bp);
 		}
-	} else {
-		/*
-		 * Catch reference count leaks
-		 */
-		ASSERT(atomic_read(&bp->b_hold) >= 0);
 	}
 }
 
@@ -1037,7 +1033,7 @@
 			xfs_buf_iodone_work(&bp->b_iodone_work);
 		}
 	} else {
-		up(&bp->b_iodonesema);
+		complete(&bp->b_iowait);
 	}
 }
 
@@ -1275,7 +1271,7 @@
 	XB_TRACE(bp, "iowait", 0);
 	if (atomic_read(&bp->b_io_remaining))
 		blk_run_address_space(bp->b_target->bt_mapping);
-	down(&bp->b_iodonesema);
+	wait_for_completion(&bp->b_iowait);
 	XB_TRACE(bp, "iowaited", (long)bp->b_error);
 	return bp->b_error;
 }
@@ -1799,7 +1795,7 @@
 xfs_buf_init(void)
 {
 #ifdef XFS_BUF_TRACE
-	xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
+	xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
 #endif
 
 	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 29d1d4a..fe01099 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -157,7 +157,7 @@
 	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
 	xfs_buf_relse_t		b_relse;	/* releasing function */
 	xfs_buf_bdstrat_t	b_strat;	/* pre-write function */
-	struct semaphore	b_iodonesema;	/* Semaphore for I/O waiters */
+	struct completion	b_iowait;	/* queue for I/O waiters */
 	void			*b_fspriv;
 	void			*b_fspriv2;
 	void			*b_fspriv3;
@@ -352,7 +352,7 @@
 #define XFS_BUF_CPSEMA(bp)	(xfs_buf_cond_lock(bp) == 0)
 #define XFS_BUF_VSEMA(bp)	xfs_buf_unlock(bp)
 #define XFS_BUF_PSEMA(bp,x)	xfs_buf_lock(bp)
-#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
+#define XFS_BUF_FINISH_IOWAIT(bp)	complete(&bp->b_iowait);
 
 #define XFS_BUF_SET_TARGET(bp, target)	((bp)->b_target = (target))
 #define XFS_BUF_TARGET(bp)		((bp)->b_target)
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 987fe84..24fd598 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -139,7 +139,7 @@
 	}
 
 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
-	return ip->i_vnode;
+	return VFS_I(ip);
 }
 
 STATIC struct dentry *
@@ -167,7 +167,7 @@
 	if (!inode)
 		return NULL;
 	if (IS_ERR(inode))
-		return ERR_PTR(PTR_ERR(inode));
+		return ERR_CAST(inode);
 	result = d_alloc_anon(inode);
 	if (!result) {
 		iput(inode);
@@ -198,7 +198,7 @@
 	if (!inode)
 		return NULL;
 	if (IS_ERR(inode))
-		return ERR_PTR(PTR_ERR(inode));
+		return ERR_CAST(inode);
 	result = d_alloc_anon(inode);
 	if (!result) {
 		iput(inode);
@@ -219,9 +219,9 @@
 	if (unlikely(error))
 		return ERR_PTR(-error);
 
-	parent = d_alloc_anon(cip->i_vnode);
+	parent = d_alloc_anon(VFS_I(cip));
 	if (unlikely(!parent)) {
-		iput(cip->i_vnode);
+		iput(VFS_I(cip));
 		return ERR_PTR(-ENOMEM);
 	}
 	return parent;
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 1eefe61..36caa6d 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -31,7 +31,7 @@
 	xfs_off_t	last,
 	int		fiopt)
 {
-	struct address_space *mapping = ip->i_vnode->i_mapping;
+	struct address_space *mapping = VFS_I(ip)->i_mapping;
 
 	if (mapping->nrpages)
 		truncate_inode_pages(mapping, first);
@@ -44,7 +44,7 @@
 	xfs_off_t	last,
 	int		fiopt)
 {
-	struct address_space *mapping = ip->i_vnode->i_mapping;
+	struct address_space *mapping = VFS_I(ip)->i_mapping;
 	int		ret = 0;
 
 	if (mapping->nrpages) {
@@ -64,7 +64,7 @@
 	uint64_t	flags,
 	int		fiopt)
 {
-	struct address_space *mapping = ip->i_vnode->i_mapping;
+	struct address_space *mapping = VFS_I(ip)->i_mapping;
 	int		ret = 0;
 	int		ret2;
 
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index acb978d..48799ba 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -245,7 +245,7 @@
 
 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
-	*inode = XFS_ITOV(ip);
+	*inode = VFS_I(ip);
 	return 0;
 }
 
@@ -927,7 +927,7 @@
 xfs_diflags_to_linux(
 	struct xfs_inode	*ip)
 {
-	struct inode		*inode = XFS_ITOV(ip);
+	struct inode		*inode = VFS_I(ip);
 	unsigned int		xflags = xfs_ip2xflags(ip);
 
 	if (xflags & XFS_XFLAG_IMMUTABLE)
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index e88f510..91bcd97 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -62,7 +62,7 @@
 xfs_synchronize_atime(
 	xfs_inode_t	*ip)
 {
-	struct inode	*inode = ip->i_vnode;
+	struct inode	*inode = VFS_I(ip);
 
 	if (inode) {
 		ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
@@ -79,7 +79,7 @@
 xfs_mark_inode_dirty_sync(
 	xfs_inode_t	*ip)
 {
-	struct inode	*inode = ip->i_vnode;
+	struct inode	*inode = VFS_I(ip);
 
 	if (inode)
 		mark_inode_dirty_sync(inode);
@@ -89,36 +89,31 @@
  * Change the requested timestamp in the given inode.
  * We don't lock across timestamp updates, and we don't log them but
  * we do record the fact that there is dirty information in core.
- *
- * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG
- *		with XFS_ICHGTIME_ACC to be sure that access time
- *		update will take.  Calling first with XFS_ICHGTIME_ACC
- *		and then XFS_ICHGTIME_MOD may fail to modify the access
- *		timestamp if the filesystem is mounted noacctm.
  */
 void
 xfs_ichgtime(
 	xfs_inode_t	*ip,
 	int		flags)
 {
-	struct inode	*inode = vn_to_inode(XFS_ITOV(ip));
+	struct inode	*inode = VFS_I(ip);
 	timespec_t	tv;
+	int		sync_it = 0;
 
-	nanotime(&tv);
-	if (flags & XFS_ICHGTIME_MOD) {
+	tv = current_fs_time(inode->i_sb);
+
+	if ((flags & XFS_ICHGTIME_MOD) &&
+	    !timespec_equal(&inode->i_mtime, &tv)) {
 		inode->i_mtime = tv;
 		ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
 		ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
+		sync_it = 1;
 	}
-	if (flags & XFS_ICHGTIME_ACC) {
-		inode->i_atime = tv;
-		ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec;
-		ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec;
-	}
-	if (flags & XFS_ICHGTIME_CHG) {
+	if ((flags & XFS_ICHGTIME_CHG) &&
+	    !timespec_equal(&inode->i_ctime, &tv)) {
 		inode->i_ctime = tv;
 		ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec;
 		ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec;
+		sync_it = 1;
 	}
 
 	/*
@@ -130,55 +125,11 @@
 	 * ensure that the compiler does not reorder the update
 	 * of i_update_core above the timestamp updates above.
 	 */
-	SYNCHRONIZE();
-	ip->i_update_core = 1;
-	if (!(inode->i_state & I_NEW))
+	if (sync_it) {
+		SYNCHRONIZE();
+		ip->i_update_core = 1;
 		mark_inode_dirty_sync(inode);
-}
-
-/*
- * Variant on the above which avoids querying the system clock
- * in situations where we know the Linux inode timestamps have
- * just been updated (and so we can update our inode cheaply).
- */
-void
-xfs_ichgtime_fast(
-	xfs_inode_t	*ip,
-	struct inode	*inode,
-	int		flags)
-{
-	timespec_t	*tvp;
-
-	/*
-	 * Atime updates for read() & friends are handled lazily now, and
-	 * explicit updates must go through xfs_ichgtime()
-	 */
-	ASSERT((flags & XFS_ICHGTIME_ACC) == 0);
-
-	if (flags & XFS_ICHGTIME_MOD) {
-		tvp = &inode->i_mtime;
-		ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec;
-		ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec;
 	}
-	if (flags & XFS_ICHGTIME_CHG) {
-		tvp = &inode->i_ctime;
-		ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec;
-		ip->i_d.di_ctime.t_nsec = (__int32_t)tvp->tv_nsec;
-	}
-
-	/*
-	 * We update the i_update_core field _after_ changing
-	 * the timestamps in order to coordinate properly with
-	 * xfs_iflush() so that we don't lose timestamp updates.
-	 * This keeps us from having to hold the inode lock
-	 * while doing this.  We use the SYNCHRONIZE macro to
-	 * ensure that the compiler does not reorder the update
-	 * of i_update_core above the timestamp updates above.
-	 */
-	SYNCHRONIZE();
-	ip->i_update_core = 1;
-	if (!(inode->i_state & I_NEW))
-		mark_inode_dirty_sync(inode);
 }
 
 /*
@@ -299,7 +250,7 @@
 	if (unlikely(error))
 		goto out_free_acl;
 
-	inode = ip->i_vnode;
+	inode = VFS_I(ip);
 
 	error = xfs_init_security(inode, dir);
 	if (unlikely(error))
@@ -366,7 +317,7 @@
 		return NULL;
 	}
 
-	return d_splice_alias(cip->i_vnode, dentry);
+	return d_splice_alias(VFS_I(cip), dentry);
 }
 
 STATIC struct dentry *
@@ -399,12 +350,12 @@
 
 	/* if exact match, just splice and exit */
 	if (!ci_name.name)
-		return d_splice_alias(ip->i_vnode, dentry);
+		return d_splice_alias(VFS_I(ip), dentry);
 
 	/* else case-insensitive match... */
 	dname.name = ci_name.name;
 	dname.len = ci_name.len;
-	dentry = d_add_ci(ip->i_vnode, dentry, &dname);
+	dentry = d_add_ci(VFS_I(ip), dentry, &dname);
 	kmem_free(ci_name.name);
 	return dentry;
 }
@@ -478,7 +429,7 @@
 	if (unlikely(error))
 		goto out;
 
-	inode = cip->i_vnode;
+	inode = VFS_I(cip);
 
 	error = xfs_init_security(inode, dir);
 	if (unlikely(error))
@@ -710,7 +661,7 @@
 	return error;
 }
 
-const struct inode_operations xfs_inode_operations = {
+static const struct inode_operations xfs_inode_operations = {
 	.permission		= xfs_vn_permission,
 	.truncate		= xfs_vn_truncate,
 	.getattr		= xfs_vn_getattr,
@@ -722,7 +673,7 @@
 	.fallocate		= xfs_vn_fallocate,
 };
 
-const struct inode_operations xfs_dir_inode_operations = {
+static const struct inode_operations xfs_dir_inode_operations = {
 	.create			= xfs_vn_create,
 	.lookup			= xfs_vn_lookup,
 	.link			= xfs_vn_link,
@@ -747,7 +698,7 @@
 	.listxattr		= xfs_vn_listxattr,
 };
 
-const struct inode_operations xfs_dir_ci_inode_operations = {
+static const struct inode_operations xfs_dir_ci_inode_operations = {
 	.create			= xfs_vn_create,
 	.lookup			= xfs_vn_ci_lookup,
 	.link			= xfs_vn_link,
@@ -772,7 +723,7 @@
 	.listxattr		= xfs_vn_listxattr,
 };
 
-const struct inode_operations xfs_symlink_inode_operations = {
+static const struct inode_operations xfs_symlink_inode_operations = {
 	.readlink		= generic_readlink,
 	.follow_link		= xfs_vn_follow_link,
 	.put_link		= xfs_vn_put_link,
@@ -784,3 +735,98 @@
 	.removexattr		= generic_removexattr,
 	.listxattr		= xfs_vn_listxattr,
 };
+
+STATIC void
+xfs_diflags_to_iflags(
+	struct inode		*inode,
+	struct xfs_inode	*ip)
+{
+	if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
+		inode->i_flags |= S_IMMUTABLE;
+	else
+		inode->i_flags &= ~S_IMMUTABLE;
+	if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
+		inode->i_flags |= S_APPEND;
+	else
+		inode->i_flags &= ~S_APPEND;
+	if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
+		inode->i_flags |= S_SYNC;
+	else
+		inode->i_flags &= ~S_SYNC;
+	if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
+		inode->i_flags |= S_NOATIME;
+	else
+		inode->i_flags &= ~S_NOATIME;
+}
+
+/*
+ * Initialize the Linux inode, set up the operation vectors and
+ * unlock the inode.
+ *
+ * When reading existing inodes from disk this is called directly
+ * from xfs_iget, when creating a new inode it is called from
+ * xfs_ialloc after setting up the inode.
+ */
+void
+xfs_setup_inode(
+	struct xfs_inode	*ip)
+{
+	struct inode		*inode = ip->i_vnode;
+
+	inode->i_mode	= ip->i_d.di_mode;
+	inode->i_nlink	= ip->i_d.di_nlink;
+	inode->i_uid	= ip->i_d.di_uid;
+	inode->i_gid	= ip->i_d.di_gid;
+
+	switch (inode->i_mode & S_IFMT) {
+	case S_IFBLK:
+	case S_IFCHR:
+		inode->i_rdev =
+			MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
+			      sysv_minor(ip->i_df.if_u2.if_rdev));
+		break;
+	default:
+		inode->i_rdev = 0;
+		break;
+	}
+
+	inode->i_generation = ip->i_d.di_gen;
+	i_size_write(inode, ip->i_d.di_size);
+	inode->i_atime.tv_sec	= ip->i_d.di_atime.t_sec;
+	inode->i_atime.tv_nsec	= ip->i_d.di_atime.t_nsec;
+	inode->i_mtime.tv_sec	= ip->i_d.di_mtime.t_sec;
+	inode->i_mtime.tv_nsec	= ip->i_d.di_mtime.t_nsec;
+	inode->i_ctime.tv_sec	= ip->i_d.di_ctime.t_sec;
+	inode->i_ctime.tv_nsec	= ip->i_d.di_ctime.t_nsec;
+	xfs_diflags_to_iflags(inode, ip);
+	xfs_iflags_clear(ip, XFS_IMODIFIED);
+
+	switch (inode->i_mode & S_IFMT) {
+	case S_IFREG:
+		inode->i_op = &xfs_inode_operations;
+		inode->i_fop = &xfs_file_operations;
+		inode->i_mapping->a_ops = &xfs_address_space_operations;
+		break;
+	case S_IFDIR:
+		if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
+			inode->i_op = &xfs_dir_ci_inode_operations;
+		else
+			inode->i_op = &xfs_dir_inode_operations;
+		inode->i_fop = &xfs_dir_file_operations;
+		break;
+	case S_IFLNK:
+		inode->i_op = &xfs_symlink_inode_operations;
+		if (!(ip->i_df.if_flags & XFS_IFINLINE))
+			inode->i_mapping->a_ops = &xfs_address_space_operations;
+		break;
+	default:
+		inode->i_op = &xfs_inode_operations;
+		init_special_inode(inode, inode->i_mode, inode->i_rdev);
+		break;
+	}
+
+	xfs_iflags_clear(ip, XFS_INEW);
+	barrier();
+
+	unlock_new_inode(inode);
+}
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
index d97ba93..8b1a1e3 100644
--- a/fs/xfs/linux-2.6/xfs_iops.h
+++ b/fs/xfs/linux-2.6/xfs_iops.h
@@ -18,10 +18,7 @@
 #ifndef __XFS_IOPS_H__
 #define __XFS_IOPS_H__
 
-extern const struct inode_operations xfs_inode_operations;
-extern const struct inode_operations xfs_dir_inode_operations;
-extern const struct inode_operations xfs_dir_ci_inode_operations;
-extern const struct inode_operations xfs_symlink_inode_operations;
+struct xfs_inode;
 
 extern const struct file_operations xfs_file_operations;
 extern const struct file_operations xfs_dir_file_operations;
@@ -29,14 +26,6 @@
 
 extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
 
-struct xfs_inode;
-extern void xfs_ichgtime(struct xfs_inode *, int);
-extern void xfs_ichgtime_fast(struct xfs_inode *, struct inode *, int);
-
-#define xfs_vtoi(vp) \
-	((struct xfs_inode *)vn_to_inode(vp)->i_private)
-
-#define XFS_I(inode) \
-	((struct xfs_inode *)(inode)->i_private)
+extern void xfs_setup_inode(struct xfs_inode *);
 
 #endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 4d45d93..cc0f7b3 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -45,13 +45,13 @@
 #include <mrlock.h>
 #include <sv.h>
 #include <mutex.h>
-#include <sema.h>
 #include <time.h>
 
 #include <support/ktrace.h>
 #include <support/debug.h>
 #include <support/uuid.h>
 
+#include <linux/semaphore.h>
 #include <linux/mm.h>
 #include <linux/kernel.h>
 #include <linux/blkdev.h>
@@ -126,8 +126,6 @@
 
 #define current_cpu()		(raw_smp_processor_id())
 #define current_pid()		(current->pid)
-#define current_fsuid(cred)	(current->fsuid)
-#define current_fsgid(cred)	(current->fsgid)
 #define current_test_flags(f)	(current->flags & (f))
 #define current_set_flags_nested(sp, f)		\
 		(*(sp) = current->flags, current->flags |= (f))
@@ -180,7 +178,7 @@
 #define xfs_sort(a,n,s,fn)	sort(a,n,s,fn,NULL)
 #define xfs_stack_trace()	dump_stack()
 #define xfs_itruncate_data(ip, off)	\
-	(-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off)))
+	(-vmtruncate(VFS_I(ip), (off)))
 
 
 /* Move the kernel do_div definition off to one side */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 82333b3..1957e535 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -137,7 +137,7 @@
 	struct address_space	*mapping;
 	int			status;
 
-	mapping = ip->i_vnode->i_mapping;
+	mapping = VFS_I(ip)->i_mapping;
 	do {
 		unsigned offset, bytes;
 		void *fsdata;
@@ -674,9 +674,7 @@
 	 */
 	if (likely(!(ioflags & IO_INVIS) &&
 		   !mnt_want_write(file->f_path.mnt))) {
-		file_update_time(file);
-		xfs_ichgtime_fast(xip, inode,
-				  XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+		xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
 		mnt_drop_write(file->f_path.mnt);
 	}
 
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 30ae963..73c65f1 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -581,118 +581,6 @@
 	return (((__uint64_t)pagefactor) << bitshift) - 1;
 }
 
-STATIC_INLINE void
-xfs_set_inodeops(
-	struct inode		*inode)
-{
-	switch (inode->i_mode & S_IFMT) {
-	case S_IFREG:
-		inode->i_op = &xfs_inode_operations;
-		inode->i_fop = &xfs_file_operations;
-		inode->i_mapping->a_ops = &xfs_address_space_operations;
-		break;
-	case S_IFDIR:
-		if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
-			inode->i_op = &xfs_dir_ci_inode_operations;
-		else
-			inode->i_op = &xfs_dir_inode_operations;
-		inode->i_fop = &xfs_dir_file_operations;
-		break;
-	case S_IFLNK:
-		inode->i_op = &xfs_symlink_inode_operations;
-		if (!(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE))
-			inode->i_mapping->a_ops = &xfs_address_space_operations;
-		break;
-	default:
-		inode->i_op = &xfs_inode_operations;
-		init_special_inode(inode, inode->i_mode, inode->i_rdev);
-		break;
-	}
-}
-
-STATIC_INLINE void
-xfs_revalidate_inode(
-	xfs_mount_t		*mp,
-	bhv_vnode_t		*vp,
-	xfs_inode_t		*ip)
-{
-	struct inode		*inode = vn_to_inode(vp);
-
-	inode->i_mode	= ip->i_d.di_mode;
-	inode->i_nlink	= ip->i_d.di_nlink;
-	inode->i_uid	= ip->i_d.di_uid;
-	inode->i_gid	= ip->i_d.di_gid;
-
-	switch (inode->i_mode & S_IFMT) {
-	case S_IFBLK:
-	case S_IFCHR:
-		inode->i_rdev =
-			MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
-			      sysv_minor(ip->i_df.if_u2.if_rdev));
-		break;
-	default:
-		inode->i_rdev = 0;
-		break;
-	}
-
-	inode->i_generation = ip->i_d.di_gen;
-	i_size_write(inode, ip->i_d.di_size);
-	inode->i_atime.tv_sec	= ip->i_d.di_atime.t_sec;
-	inode->i_atime.tv_nsec	= ip->i_d.di_atime.t_nsec;
-	inode->i_mtime.tv_sec	= ip->i_d.di_mtime.t_sec;
-	inode->i_mtime.tv_nsec	= ip->i_d.di_mtime.t_nsec;
-	inode->i_ctime.tv_sec	= ip->i_d.di_ctime.t_sec;
-	inode->i_ctime.tv_nsec	= ip->i_d.di_ctime.t_nsec;
-	if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
-		inode->i_flags |= S_IMMUTABLE;
-	else
-		inode->i_flags &= ~S_IMMUTABLE;
-	if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
-		inode->i_flags |= S_APPEND;
-	else
-		inode->i_flags &= ~S_APPEND;
-	if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
-		inode->i_flags |= S_SYNC;
-	else
-		inode->i_flags &= ~S_SYNC;
-	if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
-		inode->i_flags |= S_NOATIME;
-	else
-		inode->i_flags &= ~S_NOATIME;
-	xfs_iflags_clear(ip, XFS_IMODIFIED);
-}
-
-void
-xfs_initialize_vnode(
-	struct xfs_mount	*mp,
-	bhv_vnode_t		*vp,
-	struct xfs_inode	*ip)
-{
-	struct inode		*inode = vn_to_inode(vp);
-
-	if (!ip->i_vnode) {
-		ip->i_vnode = vp;
-		inode->i_private = ip;
-	}
-
-	/*
-	 * We need to set the ops vectors, and unlock the inode, but if
-	 * we have been called during the new inode create process, it is
-	 * too early to fill in the Linux inode.  We will get called a
-	 * second time once the inode is properly set up, and then we can
-	 * finish our work.
-	 */
-	if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
-		xfs_revalidate_inode(mp, vp, ip);
-		xfs_set_inodeops(inode);
-
-		xfs_iflags_clear(ip, XFS_INEW);
-		barrier();
-
-		unlock_new_inode(inode);
-	}
-}
-
 int
 xfs_blkdev_get(
 	xfs_mount_t		*mp,
@@ -982,26 +870,21 @@
 xfs_fs_alloc_inode(
 	struct super_block	*sb)
 {
-	bhv_vnode_t		*vp;
-
-	vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
-	if (unlikely(!vp))
-		return NULL;
-	return vn_to_inode(vp);
+	return kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
 }
 
 STATIC void
 xfs_fs_destroy_inode(
 	struct inode		*inode)
 {
-	kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
+	kmem_zone_free(xfs_vnode_zone, inode);
 }
 
 STATIC void
 xfs_fs_inode_init_once(
 	void			*vnode)
 {
-	inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
+	inode_init_once((struct inode *)vnode);
 }
 
 /*
@@ -1106,7 +989,7 @@
 xfs_flush_inode(
 	xfs_inode_t	*ip)
 {
-	struct inode	*inode = ip->i_vnode;
+	struct inode	*inode = VFS_I(ip);
 
 	igrab(inode);
 	xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
@@ -1131,7 +1014,7 @@
 xfs_flush_device(
 	xfs_inode_t	*ip)
 {
-	struct inode	*inode = vn_to_inode(XFS_ITOV(ip));
+	struct inode	*inode = VFS_I(ip);
 
 	igrab(inode);
 	xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
@@ -1201,6 +1084,15 @@
 }
 
 STATIC void
+xfs_free_fsname(
+	struct xfs_mount	*mp)
+{
+	kfree(mp->m_fsname);
+	kfree(mp->m_rtname);
+	kfree(mp->m_logname);
+}
+
+STATIC void
 xfs_fs_put_super(
 	struct super_block	*sb)
 {
@@ -1239,8 +1131,6 @@
 	error = xfs_unmount_flush(mp, 0);
 	WARN_ON(error);
 
-	IRELE(rip);
-
 	/*
 	 * If we're forcing a shutdown, typically because of a media error,
 	 * we want to make sure we invalidate dirty pages that belong to
@@ -1257,10 +1147,12 @@
 	}
 
 	xfs_unmountfs(mp);
+	xfs_freesb(mp);
 	xfs_icsb_destroy_counters(mp);
 	xfs_close_devices(mp);
 	xfs_qmops_put(mp);
 	xfs_dmops_put(mp);
+	xfs_free_fsname(mp);
 	kfree(mp);
 }
 
@@ -1517,6 +1409,8 @@
 	struct xfs_mount_args	*ap,
 	struct xfs_mount	*mp)
 {
+	int			error;
+
 	/* Values are in BBs */
 	if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
 		/*
@@ -1549,17 +1443,27 @@
 			ap->logbufsize);
 		return XFS_ERROR(EINVAL);
 	}
+
+	error = ENOMEM;
+
 	mp->m_logbsize = ap->logbufsize;
 	mp->m_fsname_len = strlen(ap->fsname) + 1;
-	mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP);
-	strcpy(mp->m_fsname, ap->fsname);
+
+	mp->m_fsname = kstrdup(ap->fsname, GFP_KERNEL);
+	if (!mp->m_fsname)
+		goto out;
+
 	if (ap->rtname[0]) {
-		mp->m_rtname = kmem_alloc(strlen(ap->rtname) + 1, KM_SLEEP);
-		strcpy(mp->m_rtname, ap->rtname);
+		mp->m_rtname = kstrdup(ap->rtname, GFP_KERNEL);
+		if (!mp->m_rtname)
+			goto out_free_fsname;
+
 	}
+
 	if (ap->logname[0]) {
-		mp->m_logname = kmem_alloc(strlen(ap->logname) + 1, KM_SLEEP);
-		strcpy(mp->m_logname, ap->logname);
+		mp->m_logname = kstrdup(ap->logname, GFP_KERNEL);
+		if (!mp->m_logname)
+			goto out_free_rtname;
 	}
 
 	if (ap->flags & XFSMNT_WSYNC)
@@ -1632,6 +1536,14 @@
 	if (ap->flags & XFSMNT_DMAPI)
 		mp->m_flags |= XFS_MOUNT_DMAPI;
 	return 0;
+
+
+ out_free_rtname:
+	kfree(mp->m_rtname);
+ out_free_fsname:
+	kfree(mp->m_fsname);
+ out:
+	return error;
 }
 
 /*
@@ -1792,10 +1704,10 @@
 	 */
 	error = xfs_start_flags(args, mp);
 	if (error)
-		goto out_destroy_counters;
+		goto out_free_fsname;
 	error = xfs_readsb(mp, flags);
 	if (error)
-		goto out_destroy_counters;
+		goto out_free_fsname;
 	error = xfs_finish_flags(args, mp);
 	if (error)
 		goto out_free_sb;
@@ -1811,7 +1723,7 @@
 	if (error)
 		goto out_free_sb;
 
-	error = xfs_mountfs(mp, flags);
+	error = xfs_mountfs(mp);
 	if (error)
 		goto out_filestream_unmount;
 
@@ -1825,7 +1737,7 @@
 	sb->s_time_gran = 1;
 	set_posix_acl_flag(sb);
 
-	root = igrab(mp->m_rootip->i_vnode);
+	root = igrab(VFS_I(mp->m_rootip));
 	if (!root) {
 		error = ENOENT;
 		goto fail_unmount;
@@ -1857,7 +1769,8 @@
 	xfs_filestream_unmount(mp);
  out_free_sb:
 	xfs_freesb(mp);
- out_destroy_counters:
+ out_free_fsname:
+	xfs_free_fsname(mp);
 	xfs_icsb_destroy_counters(mp);
 	xfs_close_devices(mp);
  out_put_qmops:
@@ -1890,10 +1803,8 @@
 	error = xfs_unmount_flush(mp, 0);
 	WARN_ON(error);
 
-	IRELE(mp->m_rootip);
-
 	xfs_unmountfs(mp);
-	goto out_destroy_counters;
+	goto out_free_sb;
 }
 
 STATIC int
@@ -2014,7 +1925,7 @@
 STATIC int __init
 xfs_init_zones(void)
 {
-	xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
+	xfs_vnode_zone = kmem_zone_init_flags(sizeof(struct inode), "xfs_vnode",
 					KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
 					KM_ZONE_SPREAD,
 					xfs_fs_inode_init_once);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index b7d13da..fe2ef4e 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -101,9 +101,6 @@
 
 extern __uint64_t xfs_max_file_offset(unsigned int);
 
-extern void xfs_initialize_vnode(struct xfs_mount *mp, bhv_vnode_t *vp,
-		struct xfs_inode *ip);
-
 extern void xfs_flush_inode(struct xfs_inode *);
 extern void xfs_flush_device(struct xfs_inode *);
 
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index 25488b6..b52528b 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -33,7 +33,7 @@
 
 
 /*
- * Dedicated vnode inactive/reclaim sync semaphores.
+ * Dedicated vnode inactive/reclaim sync wait queues.
  * Prime number of hash buckets since address is used as the key.
  */
 #define NVSYNC                  37
@@ -82,24 +82,6 @@
 		xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l);
 }
 
-
-/*
- * Add a reference to a referenced vnode.
- */
-bhv_vnode_t *
-vn_hold(
-	bhv_vnode_t	*vp)
-{
-	struct inode	*inode;
-
-	XFS_STATS_INC(vn_hold);
-
-	inode = igrab(vn_to_inode(vp));
-	ASSERT(inode);
-
-	return vp;
-}
-
 #ifdef	XFS_INODE_TRACE
 
 /*
@@ -108,7 +90,7 @@
  */
 static inline int xfs_icount(struct xfs_inode *ip)
 {
-	bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
+	struct inode *vp = VFS_I(ip);
 
 	if (vp)
 		return vn_count(vp);
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 41ca2ce..683ce16 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -22,20 +22,6 @@
 struct xfs_iomap;
 struct attrlist_cursor_kern;
 
-typedef struct inode	bhv_vnode_t;
-
-/*
- * Vnode to Linux inode mapping.
- */
-static inline bhv_vnode_t *vn_from_inode(struct inode *inode)
-{
-	return inode;
-}
-static inline struct inode *vn_to_inode(bhv_vnode_t *vnode)
-{
-	return vnode;
-}
-
 /*
  * Return values for xfs_inactive.  A return value of
  * VN_INACTIVE_NOCACHE implies that the file system behavior
@@ -76,57 +62,52 @@
 extern void	vn_iowake(struct xfs_inode *ip);
 extern void	vn_ioerror(struct xfs_inode *ip, int error, char *f, int l);
 
-static inline int vn_count(bhv_vnode_t *vp)
+static inline int vn_count(struct inode *vp)
 {
-	return atomic_read(&vn_to_inode(vp)->i_count);
+	return atomic_read(&vp->i_count);
 }
 
-/*
- * Vnode reference counting functions (and macros for compatibility).
- */
-extern bhv_vnode_t	*vn_hold(bhv_vnode_t *);
+#define IHOLD(ip) \
+do { \
+	ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
+	atomic_inc(&(VFS_I(ip)->i_count)); \
+	xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
+} while (0)
 
-#if defined(XFS_INODE_TRACE)
-#define VN_HOLD(vp)		\
-	((void)vn_hold(vp),	\
-	  xfs_itrace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address))
-#define VN_RELE(vp)		\
-	  (xfs_itrace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \
-	   iput(vn_to_inode(vp)))
-#else
-#define VN_HOLD(vp)		((void)vn_hold(vp))
-#define VN_RELE(vp)		(iput(vn_to_inode(vp)))
-#endif
+#define IRELE(ip) \
+do { \
+	xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
+	iput(VFS_I(ip)); \
+} while (0)
 
-static inline bhv_vnode_t *vn_grab(bhv_vnode_t *vp)
+static inline struct inode *vn_grab(struct inode *vp)
 {
-	struct inode *inode = igrab(vn_to_inode(vp));
-	return inode ? vn_from_inode(inode) : NULL;
+	return igrab(vp);
 }
 
 /*
  * Dealing with bad inodes
  */
-static inline int VN_BAD(bhv_vnode_t *vp)
+static inline int VN_BAD(struct inode *vp)
 {
-	return is_bad_inode(vn_to_inode(vp));
+	return is_bad_inode(vp);
 }
 
 /*
  * Extracting atime values in various formats
  */
-static inline void vn_atime_to_bstime(bhv_vnode_t *vp, xfs_bstime_t *bs_atime)
+static inline void vn_atime_to_bstime(struct inode *vp, xfs_bstime_t *bs_atime)
 {
 	bs_atime->tv_sec = vp->i_atime.tv_sec;
 	bs_atime->tv_nsec = vp->i_atime.tv_nsec;
 }
 
-static inline void vn_atime_to_timespec(bhv_vnode_t *vp, struct timespec *ts)
+static inline void vn_atime_to_timespec(struct inode *vp, struct timespec *ts)
 {
 	*ts = vp->i_atime;
 }
 
-static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt)
+static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt)
 {
 	*tt = vp->i_atime.tv_sec;
 }
@@ -134,9 +115,9 @@
 /*
  * Some useful predicates.
  */
-#define VN_MAPPED(vp)	mapping_mapped(vn_to_inode(vp)->i_mapping)
-#define VN_CACHED(vp)	(vn_to_inode(vp)->i_mapping->nrpages)
-#define VN_DIRTY(vp)	mapping_tagged(vn_to_inode(vp)->i_mapping, \
+#define VN_MAPPED(vp)	mapping_mapped(vp->i_mapping)
+#define VN_CACHED(vp)	(vp->i_mapping->nrpages)
+#define VN_DIRTY(vp)	mapping_tagged(vp->i_mapping, \
 					PAGECACHE_TAG_DIRTY)
 
 
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index fc9f3fb..f2705f2 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -101,11 +101,18 @@
 	if (brandnewdquot) {
 		dqp->dq_flnext = dqp->dq_flprev = dqp;
 		mutex_init(&dqp->q_qlock);
-		initnsema(&dqp->q_flock, 1, "fdq");
 		sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");
 
+		/*
+		 * Because we want to use a counting completion, complete
+		 * the flush completion once to allow a single access to
+		 * the flush completion without blocking.
+		 */
+		init_completion(&dqp->q_flush);
+		complete(&dqp->q_flush);
+
 #ifdef XFS_DQUOT_TRACE
-		dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP);
+		dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS);
 		xfs_dqtrace_entry(dqp, "DQINIT");
 #endif
 	} else {
@@ -150,7 +157,6 @@
 	ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp));
 
 	mutex_destroy(&dqp->q_qlock);
-	freesema(&dqp->q_flock);
 	sv_destroy(&dqp->q_pinwait);
 
 #ifdef XFS_DQUOT_TRACE
@@ -431,7 +437,7 @@
 	 * when it unlocks the inode. Since we want to keep the quota
 	 * inode around, we bump the vnode ref count now.
 	 */
-	VN_HOLD(XFS_ITOV(quotip));
+	IHOLD(quotip);
 
 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
 	nmaps = 1;
@@ -1211,7 +1217,7 @@
 	int			error;
 
 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
-	ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
+	ASSERT(!completion_done(&dqp->q_flush));
 	xfs_dqtrace_entry(dqp, "DQFLUSH");
 
 	/*
@@ -1348,34 +1354,18 @@
 	xfs_dqfunlock(dqp);
 }
 
-
-int
-xfs_qm_dqflock_nowait(
-	xfs_dquot_t *dqp)
-{
-	int locked;
-
-	locked = cpsema(&((dqp)->q_flock));
-
-	/* XXX ifdef these out */
-	if (locked)
-		(dqp)->dq_flags |= XFS_DQ_FLOCKED;
-	return (locked);
-}
-
-
 int
 xfs_qm_dqlock_nowait(
 	xfs_dquot_t *dqp)
 {
-	return (mutex_trylock(&((dqp)->q_qlock)));
+	return mutex_trylock(&dqp->q_qlock);
 }
 
 void
 xfs_dqlock(
 	xfs_dquot_t *dqp)
 {
-	mutex_lock(&(dqp->q_qlock));
+	mutex_lock(&dqp->q_qlock);
 }
 
 void
@@ -1468,7 +1458,7 @@
 	 * if we're turning off quotas. Basically, we need this flush
 	 * lock, and are willing to block on it.
 	 */
-	if (! xfs_qm_dqflock_nowait(dqp)) {
+	if (!xfs_dqflock_nowait(dqp)) {
 		/*
 		 * Block on the flush lock after nudging dquot buffer,
 		 * if it is incore.
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index f7393bb..8958d0f 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -82,7 +82,7 @@
 	xfs_qcnt_t	 q_res_icount;	/* total inos allocd+reserved */
 	xfs_qcnt_t	 q_res_rtbcount;/* total realtime blks used+reserved */
 	mutex_t		 q_qlock;	/* quota lock */
-	sema_t		 q_flock;	/* flush lock */
+	struct completion q_flush;	/* flush completion queue */
 	uint		 q_pincount;	/* pin count for this dquot */
 	sv_t		 q_pinwait;	/* sync var for pinning */
 #ifdef XFS_DQUOT_TRACE
@@ -113,17 +113,25 @@
 
 
 /*
- * The following three routines simply manage the q_flock
- * semaphore embedded in the dquot.  This semaphore synchronizes
- * processes attempting to flush the in-core dquot back to disk.
+ * Manage the q_flush completion queue embedded in the dquot.  This completion
+ * queue synchronizes processes attempting to flush the in-core dquot back to
+ * disk.
  */
-#define xfs_dqflock(dqp)	 { psema(&((dqp)->q_flock), PINOD | PRECALC);\
-				   (dqp)->dq_flags |= XFS_DQ_FLOCKED; }
-#define xfs_dqfunlock(dqp)	 { ASSERT(issemalocked(&((dqp)->q_flock))); \
-				   vsema(&((dqp)->q_flock)); \
-				   (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); }
+static inline void xfs_dqflock(xfs_dquot_t *dqp)
+{
+	wait_for_completion(&dqp->q_flush);
+}
 
-#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock)))
+static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp)
+{
+	return try_wait_for_completion(&dqp->q_flush);
+}
+
+static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
+{
+	complete(&dqp->q_flush);
+}
+
 #define XFS_DQ_IS_ON_FREELIST(dqp)  ((dqp)->dq_flnext != (dqp))
 #define XFS_DQ_IS_DIRTY(dqp)	((dqp)->dq_flags & XFS_DQ_DIRTY)
 #define XFS_QM_ISUDQ(dqp)	((dqp)->dq_flags & XFS_DQ_USER)
@@ -167,7 +175,6 @@
 extern int		xfs_qm_dqpurge(xfs_dquot_t *);
 extern void		xfs_qm_dqunpin_wait(xfs_dquot_t *);
 extern int		xfs_qm_dqlock_nowait(xfs_dquot_t *);
-extern int		xfs_qm_dqflock_nowait(xfs_dquot_t *);
 extern void		xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
 extern void		xfs_qm_adjust_dqtimers(xfs_mount_t *,
 					xfs_disk_dquot_t *);
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 08d2fc8..f028644 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -151,7 +151,7 @@
 	dqp = logitem->qli_dquot;
 
 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
-	ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
+	ASSERT(!completion_done(&dqp->q_flush));
 
 	/*
 	 * Since we were able to lock the dquot's flush lock and
@@ -245,7 +245,7 @@
 	 * inode flush completed and the inode was taken off the AIL.
 	 * So, just get out.
 	 */
-	if (!issemalocked(&(dqp->q_flock))  ||
+	if (completion_done(&dqp->q_flush)  ||
 	    ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
 		qip->qli_pushbuf_flag = 0;
 		xfs_dqunlock(dqp);
@@ -258,7 +258,7 @@
 	if (bp != NULL) {
 		if (XFS_BUF_ISDELAYWRITE(bp)) {
 			dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
-				  issemalocked(&(dqp->q_flock)));
+				  !completion_done(&dqp->q_flush));
 			qip->qli_pushbuf_flag = 0;
 			xfs_dqunlock(dqp);
 
@@ -317,7 +317,7 @@
 		return (XFS_ITEM_LOCKED);
 
 	retval = XFS_ITEM_SUCCESS;
-	if (! xfs_qm_dqflock_nowait(dqp)) {
+	if (!xfs_dqflock_nowait(dqp)) {
 		/*
 		 * The dquot is already being flushed.	It may have been
 		 * flushed delayed write, however, and we don't want to
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 021934a..df0ffef 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -310,8 +310,7 @@
  */
 void
 xfs_qm_mount_quotas(
-	xfs_mount_t	*mp,
-	int		mfsi_flags)
+	xfs_mount_t	*mp)
 {
 	int		error = 0;
 	uint		sbf;
@@ -346,8 +345,7 @@
 	/*
 	 * If any of the quotas are not consistent, do a quotacheck.
 	 */
-	if (XFS_QM_NEED_QUOTACHECK(mp) &&
-	    !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
+	if (XFS_QM_NEED_QUOTACHECK(mp)) {
 		error = xfs_qm_quotacheck(mp);
 		if (error) {
 			/* Quotacheck failed and disabled quotas. */
@@ -484,7 +482,7 @@
 		xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
 		/* XXX a sentinel would be better */
 		recl = XFS_QI_MPLRECLAIMS(mp);
-		if (! xfs_qm_dqflock_nowait(dqp)) {
+		if (!xfs_dqflock_nowait(dqp)) {
 			/*
 			 * If we can't grab the flush lock then check
 			 * to see if the dquot has been flushed delayed
@@ -1062,7 +1060,7 @@
 
 		/* XXX a sentinel would be better */
 		recl = XFS_QI_MPLRECLAIMS(mp);
-		if (! xfs_qm_dqflock_nowait(dqp)) {
+		if (!xfs_dqflock_nowait(dqp)) {
 			if (nowait) {
 				xfs_dqunlock(dqp);
 				continue;
@@ -2079,7 +2077,7 @@
 		 * Try to grab the flush lock. If this dquot is in the process of
 		 * getting flushed to disk, we don't want to reclaim it.
 		 */
-		if (! xfs_qm_dqflock_nowait(dqp)) {
+		if (!xfs_dqflock_nowait(dqp)) {
 			xfs_dqunlock(dqp);
 			dqp = dqp->dq_flnext;
 			continue;
@@ -2257,7 +2255,7 @@
 		 * Try to grab the flush lock. If this dquot is in the process of
 		 * getting flushed to disk, we don't want to reclaim it.
 		 */
-		if (! xfs_qm_dqflock_nowait(dqp)) {
+		if (!xfs_dqflock_nowait(dqp)) {
 			xfs_dqunlock(dqp);
 			continue;
 		}
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index cd2300e..44f2534 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -165,7 +165,7 @@
 #define XFS_QM_RELE(xqm)	((xqm)->qm_nrefs--)
 
 extern void		xfs_qm_destroy_quotainfo(xfs_mount_t *);
-extern void		xfs_qm_mount_quotas(xfs_mount_t *, int);
+extern void		xfs_qm_mount_quotas(xfs_mount_t *);
 extern int		xfs_qm_quotacheck(xfs_mount_t *);
 extern void		xfs_qm_unmount_quotadestroy(xfs_mount_t *);
 extern int		xfs_qm_unmount_quotas(xfs_mount_t *);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index f4f6c4c..eea2e60 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -162,7 +162,7 @@
 			 * mounting, and get on with the boring life
 			 * without disk quotas.
 			 */
-			xfs_qm_mount_quotas(mp, 0);
+			xfs_qm_mount_quotas(mp);
 		} else {
 			/*
 			 * Clear the quota flags, but remember them. This
@@ -184,13 +184,12 @@
 xfs_qm_endmount(
 	xfs_mount_t	*mp,
 	uint		needquotamount,
-	uint		quotaflags,
-	int		mfsi_flags)
+	uint		quotaflags)
 {
 	if (needquotamount) {
 		ASSERT(mp->m_qflags == 0);
 		mp->m_qflags = quotaflags;
-		xfs_qm_mount_quotas(mp, mfsi_flags);
+		xfs_qm_mount_quotas(mp);
 	}
 
 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index adfb872..1a3b803 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -1034,7 +1034,7 @@
 {
 	xfs_inode_t	*ip, *topino;
 	uint		ireclaims;
-	bhv_vnode_t	*vp;
+	struct inode	*vp;
 	boolean_t	vnode_refd;
 
 	ASSERT(mp->m_quotainfo);
@@ -1059,7 +1059,7 @@
 			ip = ip->i_mnext;
 			continue;
 		}
-		vp = XFS_ITOV_NULL(ip);
+		vp = VFS_I(ip);
 		if (!vp) {
 			ASSERT(ip->i_udquot == NULL);
 			ASSERT(ip->i_gdquot == NULL);
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 3e4648a..b2f639a 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -37,15 +37,15 @@
 #include <linux/capability.h>
 #include <linux/posix_acl_xattr.h>
 
-STATIC int	xfs_acl_setmode(bhv_vnode_t *, xfs_acl_t *, int *);
+STATIC int	xfs_acl_setmode(struct inode *, xfs_acl_t *, int *);
 STATIC void     xfs_acl_filter_mode(mode_t, xfs_acl_t *);
 STATIC void	xfs_acl_get_endian(xfs_acl_t *);
 STATIC int	xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *);
 STATIC int	xfs_acl_invalid(xfs_acl_t *);
 STATIC void	xfs_acl_sync_mode(mode_t, xfs_acl_t *);
-STATIC void	xfs_acl_get_attr(bhv_vnode_t *, xfs_acl_t *, int, int, int *);
-STATIC void	xfs_acl_set_attr(bhv_vnode_t *, xfs_acl_t *, int, int *);
-STATIC int	xfs_acl_allow_set(bhv_vnode_t *, int);
+STATIC void	xfs_acl_get_attr(struct inode *, xfs_acl_t *, int, int, int *);
+STATIC void	xfs_acl_set_attr(struct inode *, xfs_acl_t *, int, int *);
+STATIC int	xfs_acl_allow_set(struct inode *, int);
 
 kmem_zone_t *xfs_acl_zone;
 
@@ -55,7 +55,7 @@
  */
 int
 xfs_acl_vhasacl_access(
-	bhv_vnode_t	*vp)
+	struct inode	*vp)
 {
 	int		error;
 
@@ -68,7 +68,7 @@
  */
 int
 xfs_acl_vhasacl_default(
-	bhv_vnode_t	*vp)
+	struct inode	*vp)
 {
 	int		error;
 
@@ -207,7 +207,7 @@
 
 int
 xfs_acl_vget(
-	bhv_vnode_t	*vp,
+	struct inode	*vp,
 	void		*acl,
 	size_t		size,
 	int		kind)
@@ -217,7 +217,6 @@
 	posix_acl_xattr_header	*ext_acl = acl;
 	int			flags = 0;
 
-	VN_HOLD(vp);
 	if(size) {
 		if (!(_ACL_ALLOC(xfs_acl))) {
 			error = ENOMEM;
@@ -239,11 +238,10 @@
 			goto out;
 		}
 		if (kind == _ACL_TYPE_ACCESS)
-			xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, xfs_acl);
+			xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, xfs_acl);
 		error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size);
 	}
 out:
-	VN_RELE(vp);
 	if(xfs_acl)
 		_ACL_FREE(xfs_acl);
 	return -error;
@@ -251,28 +249,26 @@
 
 int
 xfs_acl_vremove(
-	bhv_vnode_t	*vp,
+	struct inode	*vp,
 	int		kind)
 {
 	int		error;
 
-	VN_HOLD(vp);
 	error = xfs_acl_allow_set(vp, kind);
 	if (!error) {
-		error = xfs_attr_remove(xfs_vtoi(vp),
+		error = xfs_attr_remove(XFS_I(vp),
 						kind == _ACL_TYPE_DEFAULT?
 						SGI_ACL_DEFAULT: SGI_ACL_FILE,
 						ATTR_ROOT);
 		if (error == ENOATTR)
 			error = 0;	/* 'scool */
 	}
-	VN_RELE(vp);
 	return -error;
 }
 
 int
 xfs_acl_vset(
-	bhv_vnode_t		*vp,
+	struct inode		*vp,
 	void			*acl,
 	size_t			size,
 	int			kind)
@@ -298,7 +294,6 @@
 		return 0;
 	}
 
-	VN_HOLD(vp);
 	error = xfs_acl_allow_set(vp, kind);
 
 	/* Incoming ACL exists, set file mode based on its value */
@@ -321,7 +316,6 @@
 	}
 
 out:
-	VN_RELE(vp);
 	_ACL_FREE(xfs_acl);
 	return -error;
 }
@@ -363,7 +357,7 @@
 
 STATIC int
 xfs_acl_allow_set(
-	bhv_vnode_t	*vp,
+	struct inode	*vp,
 	int		kind)
 {
 	if (vp->i_flags & (S_IMMUTABLE|S_APPEND))
@@ -372,7 +366,7 @@
 		return ENOTDIR;
 	if (vp->i_sb->s_flags & MS_RDONLY)
 		return EROFS;
-	if (xfs_vtoi(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER))
+	if (XFS_I(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER))
 		return EPERM;
 	return 0;
 }
@@ -566,7 +560,7 @@
  */
 STATIC void
 xfs_acl_get_attr(
-	bhv_vnode_t	*vp,
+	struct inode	*vp,
 	xfs_acl_t	*aclp,
 	int		kind,
 	int		flags,
@@ -576,7 +570,7 @@
 
 	ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1);
 	flags |= ATTR_ROOT;
-	*error = xfs_attr_get(xfs_vtoi(vp),
+	*error = xfs_attr_get(XFS_I(vp),
 					kind == _ACL_TYPE_ACCESS ?
 					SGI_ACL_FILE : SGI_ACL_DEFAULT,
 					(char *)aclp, &len, flags);
@@ -590,7 +584,7 @@
  */
 STATIC void
 xfs_acl_set_attr(
-	bhv_vnode_t	*vp,
+	struct inode	*vp,
 	xfs_acl_t	*aclp,
 	int		kind,
 	int		*error)
@@ -615,7 +609,7 @@
 		INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm);
 	}
 	INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
-	*error = xfs_attr_set(xfs_vtoi(vp),
+	*error = xfs_attr_set(XFS_I(vp),
 				kind == _ACL_TYPE_ACCESS ?
 				SGI_ACL_FILE: SGI_ACL_DEFAULT,
 				(char *)newacl, len, ATTR_ROOT);
@@ -624,7 +618,7 @@
 
 int
 xfs_acl_vtoacl(
-	bhv_vnode_t	*vp,
+	struct inode	*vp,
 	xfs_acl_t	*access_acl,
 	xfs_acl_t	*default_acl)
 {
@@ -639,7 +633,7 @@
 		if (error)
 			access_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
 		else /* We have a good ACL and the file mode, synchronize. */
-			xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, access_acl);
+			xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, access_acl);
 	}
 
 	if (default_acl) {
@@ -656,7 +650,7 @@
  */
 int
 xfs_acl_inherit(
-	bhv_vnode_t	*vp,
+	struct inode	*vp,
 	mode_t		mode,
 	xfs_acl_t	*pdaclp)
 {
@@ -715,7 +709,7 @@
  */
 STATIC int
 xfs_acl_setmode(
-	bhv_vnode_t	*vp,
+	struct inode	*vp,
 	xfs_acl_t	*acl,
 	int		*basicperms)
 {
@@ -734,7 +728,7 @@
 	 * mode.  The m:: bits take precedence over the g:: bits.
 	 */
 	iattr.ia_valid = ATTR_MODE;
-	iattr.ia_mode = xfs_vtoi(vp)->i_d.di_mode;
+	iattr.ia_mode = XFS_I(vp)->i_d.di_mode;
 	iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
 	ap = acl->acl_entry;
 	for (i = 0; i < acl->acl_cnt; ++i) {
@@ -764,7 +758,7 @@
 	if (gap && nomask)
 		iattr.ia_mode |= gap->ae_perm << 3;
 
-	return xfs_setattr(xfs_vtoi(vp), &iattr, 0, sys_cred);
+	return xfs_setattr(XFS_I(vp), &iattr, 0, sys_cred);
 }
 
 /*
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 323ee94..a4e293b 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -59,14 +59,14 @@
 		(zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
 #define xfs_acl_zone_destroy(zone)	kmem_zone_destroy(zone)
 
-extern int xfs_acl_inherit(bhv_vnode_t *, mode_t mode, xfs_acl_t *);
+extern int xfs_acl_inherit(struct inode *, mode_t mode, xfs_acl_t *);
 extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
-extern int xfs_acl_vtoacl(bhv_vnode_t *, xfs_acl_t *, xfs_acl_t *);
-extern int xfs_acl_vhasacl_access(bhv_vnode_t *);
-extern int xfs_acl_vhasacl_default(bhv_vnode_t *);
-extern int xfs_acl_vset(bhv_vnode_t *, void *, size_t, int);
-extern int xfs_acl_vget(bhv_vnode_t *, void *, size_t, int);
-extern int xfs_acl_vremove(bhv_vnode_t *, int);
+extern int xfs_acl_vtoacl(struct inode *, xfs_acl_t *, xfs_acl_t *);
+extern int xfs_acl_vhasacl_access(struct inode *);
+extern int xfs_acl_vhasacl_default(struct inode *);
+extern int xfs_acl_vset(struct inode *, void *, size_t, int);
+extern int xfs_acl_vget(struct inode *, void *, size_t, int);
+extern int xfs_acl_vremove(struct inode *, int);
 
 #define _ACL_PERM_INVALID(perm)	((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE))
 
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
index f9472a2..0b3b5ef 100644
--- a/fs/xfs/xfs_arch.h
+++ b/fs/xfs/xfs_arch.h
@@ -92,16 +92,6 @@
 	((__u8*)(pointer))[1] = (((value)     ) & 0xff); \
     }
 
-/* define generic INT_ macros */
-
-#define INT_GET(reference,arch) \
-    (((arch) == ARCH_NOCONVERT) \
-	? \
-	    (reference) \
-	: \
-	    INT_SWAP((reference),(reference)) \
-    )
-
 /* does not return a value */
 #define INT_SET(reference,arch,valueref) \
     (__builtin_constant_p(valueref) ? \
@@ -112,64 +102,6 @@
 	) \
     )
 
-/* does not return a value */
-#define INT_MOD_EXPR(reference,arch,code) \
-    (((arch) == ARCH_NOCONVERT) \
-	? \
-	    (void)((reference) code) \
-	: \
-	    (void)( \
-		(reference) = INT_GET((reference),arch) , \
-		((reference) code), \
-		INT_SET(reference, arch, reference) \
-	    ) \
-    )
-
-/* does not return a value */
-#define INT_MOD(reference,arch,delta) \
-    (void)( \
-	INT_MOD_EXPR(reference,arch,+=(delta)) \
-    )
-
-/*
- * INT_COPY - copy a value between two locations with the
- *	      _same architecture_ but _potentially different sizes_
- *
- *	    if the types of the two parameters are equal or they are
- *		in native architecture, a simple copy is done
- *
- *	    otherwise, architecture conversions are done
- *
- */
-
-/* does not return a value */
-#define INT_COPY(dst,src,arch) \
-    ( \
-	((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \
-	    ? \
-		(void)((dst) = (src)) \
-	    : \
-		INT_SET(dst, arch, INT_GET(src, arch)) \
-    )
-
-/*
- * INT_XLATE - copy a value in either direction between two locations
- *	       with different architectures
- *
- *		    dir < 0	- copy from memory to buffer (native to arch)
- *		    dir > 0	- copy from buffer to memory (arch to native)
- */
-
-/* does not return a value */
-#define INT_XLATE(buf,mem,dir,arch) {\
-    ASSERT(dir); \
-    if (dir>0) { \
-	(mem)=INT_GET(buf, arch); \
-    } else { \
-	INT_SET(buf, arch, mem); \
-    } \
-}
-
 /*
  * In directories inode numbers are stored as unaligned arrays of unsigned
  * 8bit integers on disk.
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 78de80e..f7cdc28 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -194,6 +194,46 @@
 	return(error);
 }
 
+/*
+ * Calculate how many blocks we need for the new attribute,
+ */
+int
+xfs_attr_calc_size(
+	struct xfs_inode 	*ip,
+	int			namelen,
+	int			valuelen,
+	int			*local)
+{
+	struct xfs_mount 	*mp = ip->i_mount;
+	int			size;
+	int			nblks;
+
+	/*
+	 * Determine space new attribute will use, and if it would be
+	 * "local" or "remote" (note: local != inline).
+	 */
+	size = xfs_attr_leaf_newentsize(namelen, valuelen,
+					mp->m_sb.sb_blocksize, local);
+
+	nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
+	if (*local) {
+		if (size > (mp->m_sb.sb_blocksize >> 1)) {
+			/* Double split possible */
+			nblks *= 2;
+		}
+	} else {
+		/*
+		 * Out of line attribute, cannot double split, but
+		 * make room for the attribute value itself.
+		 */
+		uint	dblocks = XFS_B_TO_FSB(mp, valuelen);
+		nblks += dblocks;
+		nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
+	}
+
+	return nblks;
+}
+
 STATIC int
 xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
 		char *value, int valuelen, int flags)
@@ -202,10 +242,9 @@
 	xfs_fsblock_t	firstblock;
 	xfs_bmap_free_t flist;
 	int		error, err2, committed;
-	int		local, size;
-	uint		nblks;
 	xfs_mount_t	*mp = dp->i_mount;
 	int             rsvd = (flags & ATTR_ROOT) != 0;
+	int		local;
 
 	/*
 	 * Attach the dquots to the inode.
@@ -241,30 +280,8 @@
 	args.whichfork = XFS_ATTR_FORK;
 	args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
 
-	/*
-	 * Determine space new attribute will use, and if it would be
-	 * "local" or "remote" (note: local != inline).
-	 */
-	size = xfs_attr_leaf_newentsize(name->len, valuelen,
-					mp->m_sb.sb_blocksize, &local);
-
-	nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
-	if (local) {
-		if (size > (mp->m_sb.sb_blocksize >> 1)) {
-			/* Double split possible */
-			nblks <<= 1;
-		}
-	} else {
-		uint	dblocks = XFS_B_TO_FSB(mp, valuelen);
-		/* Out of line attribute, cannot double split, but make
-		 * room for the attribute value itself.
-		 */
-		nblks += dblocks;
-		nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
-	}
-
 	/* Size is now blocks for attribute data */
-	args.total = nblks;
+	args.total = xfs_attr_calc_size(dp, name->len, valuelen, &local);
 
 	/*
 	 * Start our first transaction of the day.
@@ -286,18 +303,17 @@
 	if (rsvd)
 		args.trans->t_flags |= XFS_TRANS_RESERVE;
 
-	if ((error = xfs_trans_reserve(args.trans, (uint) nblks,
-				      XFS_ATTRSET_LOG_RES(mp, nblks),
-				      0, XFS_TRANS_PERM_LOG_RES,
-				      XFS_ATTRSET_LOG_COUNT))) {
+	if ((error = xfs_trans_reserve(args.trans, args.total,
+			XFS_ATTRSET_LOG_RES(mp, args.total), 0,
+			XFS_TRANS_PERM_LOG_RES, XFS_ATTRSET_LOG_COUNT))) {
 		xfs_trans_cancel(args.trans, 0);
 		return(error);
 	}
 	xfs_ilock(dp, XFS_ILOCK_EXCL);
 
-	error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0,
-			 rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
-				XFS_QMOPT_RES_REGBLKS);
+	error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, args.total, 0,
+				rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
+				       XFS_QMOPT_RES_REGBLKS);
 	if (error) {
 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
 		xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES);
@@ -384,7 +400,9 @@
 		 * Commit the leaf transformation.  We'll need another (linked)
 		 * transaction to add the new attribute to the leaf.
 		 */
-		if ((error = xfs_attr_rolltrans(&args.trans, dp)))
+
+		error = xfs_trans_roll(&args.trans, dp);
+		if (error)
 			goto out;
 
 	}
@@ -964,7 +982,8 @@
 		 * Commit the current trans (including the inode) and start
 		 * a new one.
 		 */
-		if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+		error = xfs_trans_roll(&args->trans, dp);
+		if (error)
 			return (error);
 
 		/*
@@ -978,7 +997,8 @@
 	 * Commit the transaction that added the attr name so that
 	 * later routines can manage their own transactions.
 	 */
-	if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+	error = xfs_trans_roll(&args->trans, dp);
+	if (error)
 		return (error);
 
 	/*
@@ -1067,7 +1087,7 @@
 		/*
 		 * Commit the remove and start the next trans in series.
 		 */
-		error = xfs_attr_rolltrans(&args->trans, dp);
+		error = xfs_trans_roll(&args->trans, dp);
 
 	} else if (args->rmtblkno > 0) {
 		/*
@@ -1298,7 +1318,8 @@
 			 * Commit the node conversion and start the next
 			 * trans in the chain.
 			 */
-			if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+			error = xfs_trans_roll(&args->trans, dp);
+			if (error)
 				goto out;
 
 			goto restart;
@@ -1349,7 +1370,8 @@
 	 * Commit the leaf addition or btree split and start the next
 	 * trans in the chain.
 	 */
-	if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+	error = xfs_trans_roll(&args->trans, dp);
+	if (error)
 		goto out;
 
 	/*
@@ -1449,7 +1471,8 @@
 		/*
 		 * Commit and start the next trans in the chain.
 		 */
-		if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+		error = xfs_trans_roll(&args->trans, dp);
+		if (error)
 			goto out;
 
 	} else if (args->rmtblkno > 0) {
@@ -1581,7 +1604,8 @@
 		/*
 		 * Commit the Btree join operation and start a new trans.
 		 */
-		if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+		error = xfs_trans_roll(&args->trans, dp);
+		if (error)
 			goto out;
 	}
 
@@ -2082,7 +2106,8 @@
 		/*
 		 * Start the next trans in the chain.
 		 */
-		if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+		error = xfs_trans_roll(&args->trans, dp);
+		if (error)
 			return (error);
 	}
 
@@ -2232,7 +2257,8 @@
 		/*
 		 * Close out trans and start the next one in the chain.
 		 */
-		if ((error = xfs_attr_rolltrans(&args->trans, args->dp)))
+		error = xfs_trans_roll(&args->trans, args->dp);
+		if (error)
 			return (error);
 	}
 	return(0);
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 8b2d31c..fb3b2a6 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -129,6 +129,7 @@
 /*
  * Overall external interface routines.
  */
+int xfs_attr_calc_size(struct xfs_inode *, int, int, int *);
 int xfs_attr_inactive(struct xfs_inode *dp);
 int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int);
 int xfs_attr_rmtval_get(struct xfs_da_args *args);
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 23ef5d7..79da6b2 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -2498,9 +2498,7 @@
 	/*
 	 * Commit the flag value change and start the next trans in series.
 	 */
-	error = xfs_attr_rolltrans(&args->trans, args->dp);
-
-	return(error);
+	return xfs_trans_roll(&args->trans, args->dp);
 }
 
 /*
@@ -2547,9 +2545,7 @@
 	/*
 	 * Commit the flag value change and start the next trans in series.
 	 */
-	error = xfs_attr_rolltrans(&args->trans, args->dp);
-
-	return(error);
+	return xfs_trans_roll(&args->trans, args->dp);
 }
 
 /*
@@ -2665,7 +2661,7 @@
 	/*
 	 * Commit the flag value change and start the next trans in series.
 	 */
-	error = xfs_attr_rolltrans(&args->trans, args->dp);
+	error = xfs_trans_roll(&args->trans, args->dp);
 
 	return(error);
 }
@@ -2723,7 +2719,7 @@
 	/*
 	 * Commit the invalidate and start the next transaction.
 	 */
-	error = xfs_attr_rolltrans(trans, dp);
+	error = xfs_trans_roll(trans, dp);
 
 	return (error);
 }
@@ -2825,7 +2821,8 @@
 		/*
 		 * Atomically commit the whole invalidate stuff.
 		 */
-		if ((error = xfs_attr_rolltrans(trans, dp)))
+		error = xfs_trans_roll(trans, dp);
+		if (error)
 			return (error);
 	}
 
@@ -2964,7 +2961,8 @@
 			/*
 			 * Roll to next transaction.
 			 */
-			if ((error = xfs_attr_rolltrans(trans, dp)))
+			error = xfs_trans_roll(trans, dp);
+			if (error)
 				return (error);
 		}
 
@@ -2974,60 +2972,3 @@
 
 	return(0);
 }
-
-
-/*
- * Roll from one trans in the sequence of PERMANENT transactions to the next.
- */
-int
-xfs_attr_rolltrans(xfs_trans_t **transp, xfs_inode_t *dp)
-{
-	xfs_trans_t *trans;
-	unsigned int logres, count;
-	int	error;
-
-	/*
-	 * Ensure that the inode is always logged.
-	 */
-	trans = *transp;
-	xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
-
-	/*
-	 * Copy the critical parameters from one trans to the next.
-	 */
-	logres = trans->t_log_res;
-	count = trans->t_log_count;
-	*transp = xfs_trans_dup(trans);
-
-	/*
-	 * Commit the current transaction.
-	 * If this commit failed, then it'd just unlock those items that
-	 * are not marked ihold. That also means that a filesystem shutdown
-	 * is in progress. The caller takes the responsibility to cancel
-	 * the duplicate transaction that gets returned.
-	 */
-	if ((error = xfs_trans_commit(trans, 0)))
-		return (error);
-
-	trans = *transp;
-
-	/*
-	 * Reserve space in the log for th next transaction.
-	 * This also pushes items in the "AIL", the list of logged items,
-	 * out to disk if they are taking up space at the tail of the log
-	 * that we want to use.  This requires that either nothing be locked
-	 * across this call, or that anything that is locked be logged in
-	 * the prior and the next transactions.
-	 */
-	error = xfs_trans_reserve(trans, 0, logres, 0,
-				  XFS_TRANS_PERM_LOG_RES, count);
-	/*
-	 *  Ensure that the inode is in the new transaction and locked.
-	 */
-	if (!error) {
-		xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
-		xfs_trans_ihold(trans, dp);
-	}
-	return (error);
-
-}
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 5ecf437..83e9af4 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -274,6 +274,4 @@
 				   struct xfs_dabuf *leaf2_bp);
 int	xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize,
 					int *local);
-int	xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp);
-
 #endif	/* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c
index fab0b6d..4822884 100644
--- a/fs/xfs/xfs_bit.c
+++ b/fs/xfs/xfs_bit.c
@@ -25,109 +25,6 @@
  * XFS bit manipulation routines, used in non-realtime code.
  */
 
-#ifndef HAVE_ARCH_HIGHBIT
-/*
- * Index of high bit number in byte, -1 for none set, 0..7 otherwise.
- */
-static const char xfs_highbit[256] = {
-       -1, 0, 1, 1, 2, 2, 2, 2,			/* 00 .. 07 */
-	3, 3, 3, 3, 3, 3, 3, 3,			/* 08 .. 0f */
-	4, 4, 4, 4, 4, 4, 4, 4,			/* 10 .. 17 */
-	4, 4, 4, 4, 4, 4, 4, 4,			/* 18 .. 1f */
-	5, 5, 5, 5, 5, 5, 5, 5,			/* 20 .. 27 */
-	5, 5, 5, 5, 5, 5, 5, 5,			/* 28 .. 2f */
-	5, 5, 5, 5, 5, 5, 5, 5,			/* 30 .. 37 */
-	5, 5, 5, 5, 5, 5, 5, 5,			/* 38 .. 3f */
-	6, 6, 6, 6, 6, 6, 6, 6,			/* 40 .. 47 */
-	6, 6, 6, 6, 6, 6, 6, 6,			/* 48 .. 4f */
-	6, 6, 6, 6, 6, 6, 6, 6,			/* 50 .. 57 */
-	6, 6, 6, 6, 6, 6, 6, 6,			/* 58 .. 5f */
-	6, 6, 6, 6, 6, 6, 6, 6,			/* 60 .. 67 */
-	6, 6, 6, 6, 6, 6, 6, 6,			/* 68 .. 6f */
-	6, 6, 6, 6, 6, 6, 6, 6,			/* 70 .. 77 */
-	6, 6, 6, 6, 6, 6, 6, 6,			/* 78 .. 7f */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* 80 .. 87 */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* 88 .. 8f */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* 90 .. 97 */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* 98 .. 9f */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* a0 .. a7 */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* a8 .. af */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* b0 .. b7 */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* b8 .. bf */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* c0 .. c7 */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* c8 .. cf */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* d0 .. d7 */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* d8 .. df */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* e0 .. e7 */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* e8 .. ef */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* f0 .. f7 */
-	7, 7, 7, 7, 7, 7, 7, 7,			/* f8 .. ff */
-};
-#endif
-
-/*
- * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set.
- */
-inline int
-xfs_highbit32(
-	__uint32_t	v)
-{
-#ifdef HAVE_ARCH_HIGHBIT
-	return highbit32(v);
-#else
-	int		i;
-
-	if (v & 0xffff0000)
-		if (v & 0xff000000)
-			i = 24;
-		else
-			i = 16;
-	else if (v & 0x0000ffff)
-		if (v & 0x0000ff00)
-			i = 8;
-		else
-			i = 0;
-	else
-		return -1;
-	return i + xfs_highbit[(v >> i) & 0xff];
-#endif
-}
-
-/*
- * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set.
- */
-int
-xfs_lowbit64(
-	__uint64_t	v)
-{
-	__uint32_t	w = (__uint32_t)v;
-	int		n = 0;
-
-	if (w) {	/* lower bits */
-		n = ffs(w);
-	} else {	/* upper bits */
-		w = (__uint32_t)(v >> 32);
-		if (w && (n = ffs(w)))
-			n += 32;
-	}
-	return n - 1;
-}
-
-/*
- * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set.
- */
-int
-xfs_highbit64(
-	__uint64_t	v)
-{
-	__uint32_t	h = (__uint32_t)(v >> 32);
-
-	if (h)
-		return xfs_highbit32(h) + 32;
-	return xfs_highbit32((__uint32_t)v);
-}
-
-
 /*
  * Return whether bitmap is empty.
  * Size is number of words in the bitmap, which is padded to word boundary
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h
index 082641a..8e0e463 100644
--- a/fs/xfs/xfs_bit.h
+++ b/fs/xfs/xfs_bit.h
@@ -47,13 +47,39 @@
 }
 
 /* Get high bit set out of 32-bit argument, -1 if none set */
-extern int xfs_highbit32(__uint32_t v);
-
-/* Get low bit set out of 64-bit argument, -1 if none set */
-extern int xfs_lowbit64(__uint64_t v);
+static inline int xfs_highbit32(__uint32_t v)
+{
+	return fls(v) - 1;
+}
 
 /* Get high bit set out of 64-bit argument, -1 if none set */
-extern int xfs_highbit64(__uint64_t);
+static inline int xfs_highbit64(__uint64_t v)
+{
+	return fls64(v) - 1;
+}
+
+/* Get low bit set out of 32-bit argument, -1 if none set */
+static inline int xfs_lowbit32(__uint32_t v)
+{
+	unsigned long	t = v;
+	return (v) ? find_first_bit(&t, 32) : -1;
+}
+
+/* Get low bit set out of 64-bit argument, -1 if none set */
+static inline int xfs_lowbit64(__uint64_t v)
+{
+	__uint32_t	w = (__uint32_t)v;
+	int		n = 0;
+
+	if (w) {	/* lower bits */
+		n = ffs(w);
+	} else {	/* upper bits */
+		w = (__uint32_t)(v >> 32);
+		if (w && (n = ffs(w)))
+		n += 32;
+	}
+	return n - 1;
+}
 
 /* Return whether bitmap is empty (1 == empty) */
 extern int xfs_bitmap_empty(uint *map, uint size);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3c4beb3..a1aab92 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -384,14 +384,14 @@
 	int             levelin,
 	int		*count);
 
-STATIC int
+STATIC void
 xfs_bmap_count_leaves(
 	xfs_ifork_t		*ifp,
 	xfs_extnum_t		idx,
 	int			numrecs,
 	int			*count);
 
-STATIC int
+STATIC void
 xfs_bmap_disk_count_leaves(
 	xfs_extnum_t		idx,
 	xfs_bmbt_block_t	*block,
@@ -4000,7 +4000,7 @@
 		ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
 	}
 	ASSERT(ip->i_d.di_anextents == 0);
-	VN_HOLD(XFS_ITOV(ip));
+	IHOLD(ip);
 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 	switch (ip->i_d.di_format) {
@@ -6096,7 +6096,7 @@
 		tp = cur->bc_tp;
 		licp = &tp->t_items;
 		while (!bp && licp != NULL) {
-			if (XFS_LIC_ARE_ALL_FREE(licp)) {
+			if (xfs_lic_are_all_free(licp)) {
 				licp = licp->lic_next;
 				continue;
 			}
@@ -6106,11 +6106,11 @@
 				xfs_buf_log_item_t	*bip;
 				xfs_buf_t		*lbp;
 
-				if (XFS_LIC_ISFREE(licp, i)) {
+				if (xfs_lic_isfree(licp, i)) {
 					continue;
 				}
 
-				lidp = XFS_LIC_SLOT(licp, i);
+				lidp = xfs_lic_slot(licp, i);
 				lip = lidp->lid_item;
 				if (lip->li_type != XFS_LI_BUF)
 					continue;
@@ -6367,13 +6367,9 @@
 	mp = ip->i_mount;
 	ifp = XFS_IFORK_PTR(ip, whichfork);
 	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
-		if (unlikely(xfs_bmap_count_leaves(ifp, 0,
+		xfs_bmap_count_leaves(ifp, 0,
 			ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
-			count) < 0)) {
-			XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)",
-					 XFS_ERRLEVEL_LOW, mp);
-			return XFS_ERROR(EFSCORRUPTED);
-		}
+			count);
 		return 0;
 	}
 
@@ -6454,13 +6450,7 @@
 		for (;;) {
 			nextbno = be64_to_cpu(block->bb_rightsib);
 			numrecs = be16_to_cpu(block->bb_numrecs);
-			if (unlikely(xfs_bmap_disk_count_leaves(0,
-					block, numrecs, count) < 0)) {
-				xfs_trans_brelse(tp, bp);
-				XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
-						 XFS_ERRLEVEL_LOW, mp);
-				return XFS_ERROR(EFSCORRUPTED);
-			}
+			xfs_bmap_disk_count_leaves(0, block, numrecs, count);
 			xfs_trans_brelse(tp, bp);
 			if (nextbno == NULLFSBLOCK)
 				break;
@@ -6478,7 +6468,7 @@
 /*
  * Count leaf blocks given a range of extent records.
  */
-STATIC int
+STATIC void
 xfs_bmap_count_leaves(
 	xfs_ifork_t		*ifp,
 	xfs_extnum_t		idx,
@@ -6491,14 +6481,13 @@
 		xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
 		*count += xfs_bmbt_get_blockcount(frp);
 	}
-	return 0;
 }
 
 /*
  * Count leaf blocks given a range of extent records originally
  * in btree format.
  */
-STATIC int
+STATIC void
 xfs_bmap_disk_count_leaves(
 	xfs_extnum_t		idx,
 	xfs_bmbt_block_t	*block,
@@ -6512,5 +6501,4 @@
 		frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b);
 		*count += xfs_bmbt_disk_get_blockcount(frp);
 	}
-	return 0;
 }
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index aeb87ca..cc593a8 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -46,38 +46,11 @@
 /*
  * Btree magic numbers.
  */
-const __uint32_t xfs_magics[XFS_BTNUM_MAX] =
-{
+const __uint32_t xfs_magics[XFS_BTNUM_MAX] = {
 	XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC
 };
 
 /*
- * Prototypes for internal routines.
- */
-
-/*
- * Checking routine: return maxrecs for the block.
- */
-STATIC int				/* number of records fitting in block */
-xfs_btree_maxrecs(
-	xfs_btree_cur_t		*cur,	/* btree cursor */
-	xfs_btree_block_t	*block);/* generic btree block pointer */
-
-/*
- * Internal routines.
- */
-
-/*
- * Retrieve the block pointer from the cursor at the given level.
- * This may be a bmap btree root or from a buffer.
- */
-STATIC xfs_btree_block_t *			/* generic btree block pointer */
-xfs_btree_get_block(
-	xfs_btree_cur_t		*cur,	/* btree cursor */
-	int			level,	/* level in btree */
-	struct xfs_buf		**bpp);	/* buffer containing the block */
-
-/*
  * Checking routine: return maxrecs for the block.
  */
 STATIC int				/* number of records fitting in block */
@@ -457,35 +430,6 @@
 }
 
 /*
- * Change the cursor to point to the first record at the given level.
- * Other levels are unaffected.
- */
-int					/* success=1, failure=0 */
-xfs_btree_firstrec(
-	xfs_btree_cur_t		*cur,	/* btree cursor */
-	int			level)	/* level to change */
-{
-	xfs_btree_block_t	*block;	/* generic btree block pointer */
-	xfs_buf_t		*bp;	/* buffer containing block */
-
-	/*
-	 * Get the block pointer for this level.
-	 */
-	block = xfs_btree_get_block(cur, level, &bp);
-	xfs_btree_check_block(cur, block, level, bp);
-	/*
-	 * It's empty, there is no such record.
-	 */
-	if (!block->bb_h.bb_numrecs)
-		return 0;
-	/*
-	 * Set the ptr value to 1, that's the first record/key.
-	 */
-	cur->bc_ptrs[level] = 1;
-	return 1;
-}
-
-/*
  * Retrieve the block pointer from the cursor at the given level.
  * This may be a bmap btree root or from a buffer.
  */
@@ -626,6 +570,13 @@
 		cur->bc_private.a.agbp = agbp;
 		cur->bc_private.a.agno = agno;
 		break;
+	case XFS_BTNUM_INO:
+		/*
+		 * Inode allocation btree fields.
+		 */
+		cur->bc_private.a.agbp = agbp;
+		cur->bc_private.a.agno = agno;
+		break;
 	case XFS_BTNUM_BMAP:
 		/*
 		 * Bmap btree fields.
@@ -638,13 +589,6 @@
 		cur->bc_private.b.flags = 0;
 		cur->bc_private.b.whichfork = whichfork;
 		break;
-	case XFS_BTNUM_INO:
-		/*
-		 * Inode allocation btree fields.
-		 */
-		cur->bc_private.i.agbp = agbp;
-		cur->bc_private.i.agno = agno;
-		break;
 	default:
 		ASSERT(0);
 	}
@@ -671,6 +615,35 @@
 }
 
 /*
+ * Change the cursor to point to the first record at the given level.
+ * Other levels are unaffected.
+ */
+int					/* success=1, failure=0 */
+xfs_btree_firstrec(
+	xfs_btree_cur_t		*cur,	/* btree cursor */
+	int			level)	/* level to change */
+{
+	xfs_btree_block_t	*block;	/* generic btree block pointer */
+	xfs_buf_t		*bp;	/* buffer containing block */
+
+	/*
+	 * Get the block pointer for this level.
+	 */
+	block = xfs_btree_get_block(cur, level, &bp);
+	xfs_btree_check_block(cur, block, level, bp);
+	/*
+	 * It's empty, there is no such record.
+	 */
+	if (!block->bb_h.bb_numrecs)
+		return 0;
+	/*
+	 * Set the ptr value to 1, that's the first record/key.
+	 */
+	cur->bc_ptrs[level] = 1;
+	return 1;
+}
+
+/*
  * Change the cursor to point to the last record in the current block
  * at the given level.  Other levels are unaffected.
  */
@@ -890,12 +863,12 @@
 	case XFS_BTNUM_INO:
 		i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]);
 		if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) {
-			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
+			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
 				be32_to_cpu(i->bb_leftsib), 1);
 			rval++;
 		}
 		if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) {
-			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
+			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
 				be32_to_cpu(i->bb_rightsib), 1);
 			rval++;
 		}
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 7440b78..1f528a2 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -158,8 +158,8 @@
 	__uint8_t	bc_blocklog;	/* log2(blocksize) of btree blocks */
 	xfs_btnum_t	bc_btnum;	/* identifies which btree type */
 	union {
-		struct {			/* needed for BNO, CNT */
-			struct xfs_buf	*agbp;	/* agf buffer pointer */
+		struct {			/* needed for BNO, CNT, INO */
+			struct xfs_buf	*agbp;	/* agf/agi buffer pointer */
 			xfs_agnumber_t	agno;	/* ag number */
 		} a;
 		struct {			/* needed for BMAP */
@@ -172,10 +172,6 @@
 			char		flags;		/* flags */
 #define	XFS_BTCUR_BPRV_WASDEL	1			/* was delayed */
 		} b;
-		struct {			/* needed for INO */
-			struct xfs_buf	*agbp;	/* agi buffer pointer */
-			xfs_agnumber_t	agno;	/* ag number */
-		} i;
 	}		bc_private;	/* per-btree type data */
 } xfs_btree_cur_t;
 
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index d86ca2c..608c30c 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -737,7 +737,7 @@
 	bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
 	bip->bli_format.blf_map_size = map_size;
 #ifdef XFS_BLI_TRACE
-	bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP);
+	bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS);
 #endif
 
 #ifdef XFS_TRANS_DEBUG
@@ -1056,7 +1056,7 @@
 			   anyway. */
 			XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse);
 			XFS_BUF_DONE(bp);
-			XFS_BUF_V_IODONESEMA(bp);
+			XFS_BUF_FINISH_IOWAIT(bp);
 		}
 		return;
 	}
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 2211e88..760f4c5 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -128,10 +128,8 @@
 	xfs_swapext_t	*sxp)
 {
 	xfs_mount_t	*mp;
-	xfs_inode_t	*ips[2];
 	xfs_trans_t	*tp;
 	xfs_bstat_t	*sbp = &sxp->sx_stat;
-	bhv_vnode_t	*vp, *tvp;
 	xfs_ifork_t	*tempifp, *ifp, *tifp;
 	int		ilf_fields, tilf_fields;
 	static uint	lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
@@ -150,19 +148,8 @@
 	}
 
 	sbp = &sxp->sx_stat;
-	vp = XFS_ITOV(ip);
-	tvp = XFS_ITOV(tip);
 
-	/* Lock in i_ino order */
-	if (ip->i_ino < tip->i_ino) {
-		ips[0] = ip;
-		ips[1] = tip;
-	} else {
-		ips[0] = tip;
-		ips[1] = ip;
-	}
-
-	xfs_lock_inodes(ips, 2, lock_flags);
+	xfs_lock_two_inodes(ip, tip, lock_flags);
 	locked = 1;
 
 	/* Verify that both files have the same format */
@@ -184,7 +171,7 @@
 		goto error0;
 	}
 
-	if (VN_CACHED(tvp) != 0) {
+	if (VN_CACHED(VFS_I(tip)) != 0) {
 		xfs_inval_cached_trace(tip, 0, -1, 0, -1);
 		error = xfs_flushinval_pages(tip, 0, -1,
 				FI_REMAPF_LOCKED);
@@ -193,7 +180,7 @@
 	}
 
 	/* Verify O_DIRECT for ftmp */
-	if (VN_CACHED(tvp) != 0) {
+	if (VN_CACHED(VFS_I(tip)) != 0) {
 		error = XFS_ERROR(EINVAL);
 		goto error0;
 	}
@@ -237,7 +224,7 @@
 	 * vop_read (or write in the case of autogrow) they block on the iolock
 	 * until we have switched the extents.
 	 */
-	if (VN_MAPPED(vp)) {
+	if (VN_MAPPED(VFS_I(ip))) {
 		error = XFS_ERROR(EBUSY);
 		goto error0;
 	}
@@ -265,7 +252,7 @@
 		locked = 0;
 		goto error0;
 	}
-	xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
+	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
 
 	/*
 	 * Count the number of extended attribute blocks
@@ -350,15 +337,11 @@
 		break;
 	}
 
-	/*
-	 * Increment vnode ref counts since xfs_trans_commit &
-	 * xfs_trans_cancel will both unlock the inodes and
-	 * decrement the associated ref counts.
-	 */
-	VN_HOLD(vp);
-	VN_HOLD(tvp);
 
+	IHOLD(ip);
 	xfs_trans_ijoin(tp, ip, lock_flags);
+
+	IHOLD(tip);
 	xfs_trans_ijoin(tp, tip, lock_flags);
 
 	xfs_trans_log_inode(tp, ip,  ilf_fields);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index f66756c..f227ecd 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -58,9 +58,6 @@
 	}
 	return e;
 }
-#endif
-
-#if (defined(DEBUG) || defined(INDUCE_IO_ERROR))
 
 int	xfs_etest[XFS_NUM_INJECT_ERROR];
 int64_t	xfs_etest_fsid[XFS_NUM_INJECT_ERROR];
@@ -154,7 +151,7 @@
 
 	return 0;
 }
-#endif /* DEBUG || INDUCE_IO_ERROR */
+#endif /* DEBUG */
 
 static void
 xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap)
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index d8559d1..11543f1 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -125,22 +125,14 @@
 #define XFS_RANDOM_DIOWRITE_IOERR			(XFS_RANDOM_DEFAULT/10)
 #define	XFS_RANDOM_BMAPIFORMAT				XFS_RANDOM_DEFAULT
 
-#if (defined(DEBUG) || defined(INDUCE_IO_ERROR))
+#ifdef DEBUG
 extern int xfs_error_test(int, int *, char *, int, char *, unsigned long);
 
 #define	XFS_NUM_INJECT_ERROR				10
-
-#ifdef __ANSI_CPP__
-#define XFS_TEST_ERROR(expr, mp, tag, rf)		\
-	((expr) || \
-	 xfs_error_test((tag), (mp)->m_fixedfsid, #expr, __LINE__, __FILE__, \
-			 (rf)))
-#else
 #define XFS_TEST_ERROR(expr, mp, tag, rf)		\
 	((expr) || \
 	 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
 			(rf)))
-#endif /* __ANSI_CPP__ */
 
 extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
 extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
@@ -148,7 +140,7 @@
 #define XFS_TEST_ERROR(expr, mp, tag, rf)	(expr)
 #define xfs_errortag_add(tag, mp)		(ENOSYS)
 #define xfs_errortag_clearall(mp, loud)		(ENOSYS)
-#endif /* (DEBUG || INDUCE_IO_ERROR) */
+#endif /* DEBUG */
 
 /*
  * XFS panic tags -- allow a call to xfs_cmn_err() be turned into
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index c38fd14..f3bb75d 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -400,7 +400,7 @@
 	if (!item_zone)
 		return -ENOMEM;
 #ifdef XFS_FILESTREAMS_TRACE
-	xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_SLEEP);
+	xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS);
 #endif
 	return 0;
 }
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index e5310c90..83502f3 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -181,7 +181,7 @@
 		 * then we can get rid of this level.
 		 */
 		if (numrecs == 1 && level > 0) {
-			agbp = cur->bc_private.i.agbp;
+			agbp = cur->bc_private.a.agbp;
 			agi = XFS_BUF_TO_AGI(agbp);
 			/*
 			 * pp is still set to the first pointer in the block.
@@ -194,7 +194,7 @@
 			 * Free the block.
 			 */
 			if ((error = xfs_free_extent(cur->bc_tp,
-				XFS_AGB_TO_FSB(mp, cur->bc_private.i.agno, bno), 1)))
+				XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, bno), 1)))
 				return error;
 			xfs_trans_binval(cur->bc_tp, bp);
 			xfs_ialloc_log_agi(cur->bc_tp, agbp,
@@ -379,7 +379,7 @@
 		rrecs = be16_to_cpu(right->bb_numrecs);
 		rbp = bp;
 		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
-				cur->bc_private.i.agno, lbno, 0, &lbp,
+				cur->bc_private.a.agno, lbno, 0, &lbp,
 				XFS_INO_BTREE_REF)))
 			return error;
 		left = XFS_BUF_TO_INOBT_BLOCK(lbp);
@@ -401,7 +401,7 @@
 		lrecs = be16_to_cpu(left->bb_numrecs);
 		lbp = bp;
 		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
-				cur->bc_private.i.agno, rbno, 0, &rbp,
+				cur->bc_private.a.agno, rbno, 0, &rbp,
 				XFS_INO_BTREE_REF)))
 			return error;
 		right = XFS_BUF_TO_INOBT_BLOCK(rbp);
@@ -484,7 +484,7 @@
 		xfs_buf_t		*rrbp;
 
 		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
-				cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), 0,
+				cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0,
 				&rrbp, XFS_INO_BTREE_REF)))
 			return error;
 		rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp);
@@ -497,7 +497,7 @@
 	 * Free the deleting block.
 	 */
 	if ((error = xfs_free_extent(cur->bc_tp, XFS_AGB_TO_FSB(mp,
-				     cur->bc_private.i.agno, rbno), 1)))
+				     cur->bc_private.a.agno, rbno), 1)))
 		return error;
 	xfs_trans_binval(cur->bc_tp, rbp);
 	/*
@@ -854,7 +854,7 @@
 	{
 		xfs_agi_t	*agi;	/* a.g. inode header */
 
-		agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp);
+		agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
 		agno = be32_to_cpu(agi->agi_seqno);
 		agbno = be32_to_cpu(agi->agi_root);
 	}
@@ -1089,7 +1089,7 @@
 	 * Set up the left neighbor as "left".
 	 */
 	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
-			cur->bc_private.i.agno, be32_to_cpu(right->bb_leftsib),
+			cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib),
 			0, &lbp, XFS_INO_BTREE_REF)))
 		return error;
 	left = XFS_BUF_TO_INOBT_BLOCK(lbp);
@@ -1207,10 +1207,10 @@
 	/*
 	 * Get a block & a buffer.
 	 */
-	agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp);
+	agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
 	args.tp = cur->bc_tp;
 	args.mp = cur->bc_mp;
-	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno,
+	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno,
 		be32_to_cpu(agi->agi_root));
 	args.mod = args.minleft = args.alignment = args.total = args.wasdel =
 		args.isfl = args.userdata = args.minalignslop = 0;
@@ -1233,7 +1233,7 @@
 	 */
 	agi->agi_root = cpu_to_be32(args.agbno);
 	be32_add_cpu(&agi->agi_level, 1);
-	xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp,
+	xfs_ialloc_log_agi(args.tp, cur->bc_private.a.agbp,
 		XFS_AGI_ROOT | XFS_AGI_LEVEL);
 	/*
 	 * At the previous root level there are now two blocks: the old
@@ -1376,7 +1376,7 @@
 	 * Set up the right neighbor as "right".
 	 */
 	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
-			cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib),
+			cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib),
 			0, &rbp, XFS_INO_BTREE_REF)))
 		return error;
 	right = XFS_BUF_TO_INOBT_BLOCK(rbp);
@@ -1492,7 +1492,7 @@
 	 * Allocate the new block.
 	 * If we can't do it, we're toast.  Give up.
 	 */
-	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, lbno);
+	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, lbno);
 	args.mod = args.minleft = args.alignment = args.total = args.wasdel =
 		args.isfl = args.userdata = args.minalignslop = 0;
 	args.minlen = args.maxlen = args.prod = 1;
@@ -1725,7 +1725,7 @@
 
 		agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
 		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
-				cur->bc_private.i.agno, agbno, 0, &bp,
+				cur->bc_private.a.agno, agbno, 0, &bp,
 				XFS_INO_BTREE_REF)))
 			return error;
 		lev--;
@@ -1897,7 +1897,7 @@
 
 		agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
 		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
-				cur->bc_private.i.agno, agbno, 0, &bp,
+				cur->bc_private.a.agno, agbno, 0, &bp,
 				XFS_INO_BTREE_REF)))
 			return error;
 		lev--;
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index b07604b..e229e9e 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -216,7 +216,14 @@
 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
 	init_waitqueue_head(&ip->i_ipin_wait);
 	atomic_set(&ip->i_pincount, 0);
-	initnsema(&ip->i_flock, 1, "xfsfino");
+
+	/*
+	 * Because we want to use a counting completion, complete
+	 * the flush completion once to allow a single access to
+	 * the flush completion without blocking.
+	 */
+	init_completion(&ip->i_flush);
+	complete(&ip->i_flush);
 
 	if (lock_flags)
 		xfs_ilock(ip, lock_flags);
@@ -288,10 +295,17 @@
 	*ipp = ip;
 
 	/*
+	 * Set up the Linux with the Linux inode.
+	 */
+	ip->i_vnode = inode;
+	inode->i_private = ip;
+
+	/*
 	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
 	 */
-	xfs_initialize_vnode(mp, inode, ip);
+	if (ip->i_d.di_mode != 0)
+		xfs_setup_inode(ip);
 	return 0;
 }
 
@@ -411,10 +425,11 @@
  * Special iput for brand-new inodes that are still locked
  */
 void
-xfs_iput_new(xfs_inode_t	*ip,
-	     uint		lock_flags)
+xfs_iput_new(
+	xfs_inode_t	*ip,
+	uint		lock_flags)
 {
-	struct inode	*inode = ip->i_vnode;
+	struct inode	*inode = VFS_I(ip);
 
 	xfs_itrace_entry(ip);
 
@@ -775,26 +790,3 @@
 }
 #endif
 
-/*
- * The following three routines simply manage the i_flock
- * semaphore embedded in the inode.  This semaphore synchronizes
- * processes attempting to flush the in-core inode back to disk.
- */
-void
-xfs_iflock(xfs_inode_t *ip)
-{
-	psema(&(ip->i_flock), PINOD|PLTWAIT);
-}
-
-int
-xfs_iflock_nowait(xfs_inode_t *ip)
-{
-	return (cpsema(&(ip->i_flock)));
-}
-
-void
-xfs_ifunlock(xfs_inode_t *ip)
-{
-	ASSERT(issemalocked(&(ip->i_flock)));
-	vsema(&(ip->i_flock));
-}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index bedc661..00e80df 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -580,8 +580,8 @@
 		xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
 		for (i = 0; i < nex; i++, dp++) {
 			xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
-			ep->l0 = be64_to_cpu(get_unaligned(&dp->l0));
-			ep->l1 = be64_to_cpu(get_unaligned(&dp->l1));
+			ep->l0 = get_unaligned_be64(&dp->l0);
+			ep->l1 = get_unaligned_be64(&dp->l1);
 		}
 		XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
 		if (whichfork != XFS_DATA_FORK ||
@@ -835,22 +835,22 @@
 	 * Do this before xfs_iformat in case it adds entries.
 	 */
 #ifdef	XFS_INODE_TRACE
-	ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP);
+	ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
 #endif
 #ifdef XFS_BMAP_TRACE
-	ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
+	ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
 #endif
 #ifdef XFS_BMBT_TRACE
-	ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
+	ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
 #endif
 #ifdef XFS_RW_TRACE
-	ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
+	ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
 #endif
 #ifdef XFS_ILOCK_TRACE
-	ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
+	ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
 #endif
 #ifdef XFS_DIR2_TRACE
-	ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
+	ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
 #endif
 
 	/*
@@ -1046,9 +1046,9 @@
 {
 	xfs_ino_t	ino;
 	xfs_inode_t	*ip;
-	bhv_vnode_t	*vp;
 	uint		flags;
 	int		error;
+	timespec_t	tv;
 
 	/*
 	 * Call the space management code to pick
@@ -1077,13 +1077,12 @@
 	}
 	ASSERT(ip != NULL);
 
-	vp = XFS_ITOV(ip);
 	ip->i_d.di_mode = (__uint16_t)mode;
 	ip->i_d.di_onlink = 0;
 	ip->i_d.di_nlink = nlink;
 	ASSERT(ip->i_d.di_nlink == nlink);
-	ip->i_d.di_uid = current_fsuid(cr);
-	ip->i_d.di_gid = current_fsgid(cr);
+	ip->i_d.di_uid = current_fsuid();
+	ip->i_d.di_gid = current_fsgid();
 	ip->i_d.di_projid = prid;
 	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
 
@@ -1130,7 +1129,13 @@
 	ip->i_size = 0;
 	ip->i_d.di_nextents = 0;
 	ASSERT(ip->i_d.di_nblocks == 0);
-	xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
+
+	nanotime(&tv);
+	ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
+	ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
+	ip->i_d.di_atime = ip->i_d.di_mtime;
+	ip->i_d.di_ctime = ip->i_d.di_mtime;
+
 	/*
 	 * di_gen will have been taken care of in xfs_iread.
 	 */
@@ -1220,7 +1225,7 @@
 	xfs_trans_log_inode(tp, ip, flags);
 
 	/* now that we have an i_mode we can setup inode ops and unlock */
-	xfs_initialize_vnode(tp->t_mountp, vp, ip);
+	xfs_setup_inode(ip);
 
 	*ipp = ip;
 	return 0;
@@ -1399,7 +1404,6 @@
 	xfs_fsize_t	last_byte;
 	xfs_off_t	toss_start;
 	xfs_mount_t	*mp;
-	bhv_vnode_t	*vp;
 	int		error = 0;
 
 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
@@ -1408,7 +1412,6 @@
 	       (flags == XFS_ITRUNC_MAYBE));
 
 	mp = ip->i_mount;
-	vp = XFS_ITOV(ip);
 
 	/* wait for the completion of any pending DIOs */
 	if (new_size < ip->i_size)
@@ -1457,7 +1460,7 @@
 
 #ifdef DEBUG
 	if (new_size == 0) {
-		ASSERT(VN_CACHED(vp) == 0);
+		ASSERT(VN_CACHED(VFS_I(ip)) == 0);
 	}
 #endif
 	return error;
@@ -2630,7 +2633,6 @@
 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
 	mrfree(&ip->i_lock);
 	mrfree(&ip->i_iolock);
-	freesema(&ip->i_flock);
 
 #ifdef XFS_INODE_TRACE
 	ktrace_free(ip->i_trace);
@@ -3048,10 +3050,10 @@
 /*
  * xfs_iflush() will write a modified inode's changes out to the
  * inode's on disk home.  The caller must have the inode lock held
- * in at least shared mode and the inode flush semaphore must be
- * held as well.  The inode lock will still be held upon return from
+ * in at least shared mode and the inode flush completion must be
+ * active as well.  The inode lock will still be held upon return from
  * the call and the caller is free to unlock it.
- * The inode flush lock will be unlocked when the inode reaches the disk.
+ * The inode flush will be completed when the inode reaches the disk.
  * The flags indicate how the inode's buffer should be written out.
  */
 int
@@ -3070,7 +3072,7 @@
 	XFS_STATS_INC(xs_iflush_count);
 
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
-	ASSERT(issemalocked(&(ip->i_flock)));
+	ASSERT(!completion_done(&ip->i_flush));
 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
 	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
 
@@ -3233,7 +3235,7 @@
 #endif
 
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
-	ASSERT(issemalocked(&(ip->i_flock)));
+	ASSERT(!completion_done(&ip->i_flush));
 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
 	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
 
@@ -3465,7 +3467,6 @@
 	xfs_mount_t	*mp)
 {
 	xfs_inode_t	*ip;
-	bhv_vnode_t	*vp;
 
  again:
 	XFS_MOUNT_ILOCK(mp);
@@ -3480,14 +3481,13 @@
 			continue;
 		}
 
-		vp = XFS_ITOV_NULL(ip);
-		if (!vp) {
+		if (!VFS_I(ip)) {
 			XFS_MOUNT_IUNLOCK(mp);
 			xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
 			goto again;
 		}
 
-		ASSERT(vn_count(vp) == 0);
+		ASSERT(vn_count(VFS_I(ip)) == 0);
 
 		ip = ip->i_mnext;
 	} while (ip != mp->m_inodes);
@@ -3707,7 +3707,7 @@
 	 * (all extents past */
 	if (nex2) {
 		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
-		nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
+		nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
 		memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
 		erp->er_extcount -= nex2;
 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
@@ -4007,8 +4007,7 @@
 			ifp->if_u1.if_extents =
 				kmem_realloc(ifp->if_u1.if_extents,
 						rnew_size,
-						ifp->if_real_bytes,
-						KM_SLEEP);
+						ifp->if_real_bytes, KM_NOFS);
 		}
 		if (rnew_size > ifp->if_real_bytes) {
 			memset(&ifp->if_u1.if_extents[ifp->if_bytes /
@@ -4067,7 +4066,7 @@
 	xfs_ifork_t	*ifp,		/* inode fork pointer */
 	int		new_size)	/* number of extents in file */
 {
-	ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP);
+	ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
 	memset(ifp->if_u1.if_extents, 0, new_size);
 	if (ifp->if_bytes) {
 		memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
@@ -4099,7 +4098,7 @@
 	} else {
 		ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
 			kmem_realloc(ifp->if_u1.if_ext_irec,
-				new_size, size, KM_SLEEP);
+				new_size, size, KM_NOFS);
 	}
 }
 
@@ -4341,11 +4340,10 @@
 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
 	ASSERT(nextents <= XFS_LINEAR_EXTS);
 
-	erp = (xfs_ext_irec_t *)
-		kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
+	erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
 
 	if (nextents == 0) {
-		ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
+		ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
 	} else if (!ifp->if_real_bytes) {
 		xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
 	} else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
@@ -4393,7 +4391,7 @@
 
 	/* Initialize new extent record */
 	erp = ifp->if_u1.if_ext_irec;
-	erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
+	erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
 	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
 	memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
 	erp[erp_idx].er_extcount = 0;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 17a04b6..1420c49 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -87,8 +87,7 @@
  * Flags for xfs_ichgtime().
  */
 #define	XFS_ICHGTIME_MOD	0x1	/* data fork modification timestamp */
-#define	XFS_ICHGTIME_ACC	0x2	/* data fork access timestamp */
-#define	XFS_ICHGTIME_CHG	0x4	/* inode field change timestamp */
+#define	XFS_ICHGTIME_CHG	0x2	/* inode field change timestamp */
 
 /*
  * Per-fork incore inode flags.
@@ -204,7 +203,7 @@
 	struct xfs_inode	*i_mprev;	/* ptr to prev inode */
 	struct xfs_mount	*i_mount;	/* fs mount struct ptr */
 	struct list_head	i_reclaim;	/* reclaim list */
-	bhv_vnode_t		*i_vnode;	/* vnode backpointer */
+	struct inode		*i_vnode;	/* vnode backpointer */
 	struct xfs_dquot	*i_udquot;	/* user dquot */
 	struct xfs_dquot	*i_gdquot;	/* group dquot */
 
@@ -223,7 +222,7 @@
 	struct xfs_inode_log_item *i_itemp;	/* logging information */
 	mrlock_t		i_lock;		/* inode lock */
 	mrlock_t		i_iolock;	/* inode IO lock */
-	sema_t			i_flock;	/* inode flush lock */
+	struct completion	i_flush;	/* inode flush completion q */
 	atomic_t		i_pincount;	/* inode pin count */
 	wait_queue_head_t	i_ipin_wait;	/* inode pinning wait queue */
 	spinlock_t		i_flags_lock;	/* inode i_flags lock */
@@ -263,6 +262,18 @@
 #define XFS_ISIZE(ip)	(((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \
 				(ip)->i_size : (ip)->i_d.di_size;
 
+/* Convert from vfs inode to xfs inode */
+static inline struct xfs_inode *XFS_I(struct inode *inode)
+{
+	return (struct xfs_inode *)inode->i_private;
+}
+
+/* convert from xfs inode to vfs inode */
+static inline struct inode *VFS_I(struct xfs_inode *ip)
+{
+	return (struct inode *)ip->i_vnode;
+}
+
 /*
  * i_flags helper functions
  */
@@ -439,9 +450,6 @@
 #define	XFS_ITRUNC_DEFINITE	0x1
 #define	XFS_ITRUNC_MAYBE	0x2
 
-#define	XFS_ITOV(ip)		((ip)->i_vnode)
-#define	XFS_ITOV_NULL(ip)	((ip)->i_vnode)
-
 /*
  * For multiple groups support: if S_ISGID bit is set in the parent
  * directory, group of new file is set to that of the parent, and
@@ -473,11 +481,8 @@
 void		xfs_iunlock(xfs_inode_t *, uint);
 void		xfs_ilock_demote(xfs_inode_t *, uint);
 int		xfs_isilocked(xfs_inode_t *, uint);
-void		xfs_iflock(xfs_inode_t *);
-int		xfs_iflock_nowait(xfs_inode_t *);
 uint		xfs_ilock_map_shared(xfs_inode_t *);
 void		xfs_iunlock_map_shared(xfs_inode_t *, uint);
-void		xfs_ifunlock(xfs_inode_t *);
 void		xfs_ireclaim(xfs_inode_t *);
 int		xfs_finish_reclaim(xfs_inode_t *, int, int);
 int		xfs_finish_reclaim_all(struct xfs_mount *, int);
@@ -522,6 +527,7 @@
 void		xfs_ichgtime(xfs_inode_t *, int);
 xfs_fsize_t	xfs_file_last_byte(xfs_inode_t *);
 void		xfs_lock_inodes(xfs_inode_t **, int, uint);
+void		xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
 
 void		xfs_synchronize_atime(xfs_inode_t *);
 void		xfs_mark_inode_dirty_sync(xfs_inode_t *);
@@ -570,6 +576,26 @@
 extern struct kmem_zone	*xfs_inode_zone;
 extern struct kmem_zone	*xfs_ili_zone;
 
+/*
+ * Manage the i_flush queue embedded in the inode.  This completion
+ * queue synchronizes processes attempting to flush the in-core
+ * inode back to disk.
+ */
+static inline void xfs_iflock(xfs_inode_t *ip)
+{
+	wait_for_completion(&ip->i_flush);
+}
+
+static inline int xfs_iflock_nowait(xfs_inode_t *ip)
+{
+	return try_wait_for_completion(&ip->i_flush);
+}
+
+static inline void xfs_ifunlock(xfs_inode_t *ip)
+{
+	complete(&ip->i_flush);
+}
+
 #endif	/* __KERNEL__ */
 
 #endif	/* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 0eee08a..97c7452 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -779,11 +779,10 @@
 	ASSERT(iip->ili_push_owner == current_pid());
 
 	/*
-	 * If flushlock isn't locked anymore, chances are that the
-	 * inode flush completed and the inode was taken off the AIL.
-	 * So, just get out.
+	 * If a flush is not in progress anymore, chances are that the
+	 * inode was taken off the AIL. So, just get out.
 	 */
-	if (!issemalocked(&(ip->i_flock)) ||
+	if (completion_done(&ip->i_flush) ||
 	    ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
 		iip->ili_pushbuf_flag = 0;
 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -805,7 +804,7 @@
 			 * If not, we can flush it async.
 			 */
 			dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
-				  issemalocked(&(ip->i_flock)));
+				  !completion_done(&ip->i_flush));
 			iip->ili_pushbuf_flag = 0;
 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
 			xfs_buftrace("INODE ITEM PUSH", bp);
@@ -858,7 +857,7 @@
 	ip = iip->ili_inode;
 
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
-	ASSERT(issemalocked(&(ip->i_flock)));
+	ASSERT(!completion_done(&ip->i_flush));
 	/*
 	 * Since we were able to lock the inode's flush lock and
 	 * we found it on the AIL, the inode must be dirty.  This
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 9a3ef9d..cf6754a 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -59,7 +59,6 @@
 {
 	xfs_icdinode_t	*dic;	/* dinode core info pointer */
 	xfs_inode_t	*ip;		/* incore inode pointer */
-	bhv_vnode_t	*vp;
 	int		error;
 
 	error = xfs_iget(mp, NULL, ino,
@@ -72,7 +71,6 @@
 	ASSERT(ip != NULL);
 	ASSERT(ip->i_blkno != (xfs_daddr_t)0);
 
-	vp = XFS_ITOV(ip);
 	dic = &ip->i_d;
 
 	/* xfs_iget returns the following without needing
@@ -85,7 +83,7 @@
 	buf->bs_uid = dic->di_uid;
 	buf->bs_gid = dic->di_gid;
 	buf->bs_size = dic->di_size;
-	vn_atime_to_bstime(vp, &buf->bs_atime);
+	vn_atime_to_bstime(VFS_I(ip), &buf->bs_atime);
 	buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
 	buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
 	buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 91b00a5..ccba14e 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -160,7 +160,7 @@
 xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
 {
 	if (!iclog->ic_trace)
-		iclog->ic_trace = ktrace_alloc(256, KM_SLEEP);
+		iclog->ic_trace = ktrace_alloc(256, KM_NOFS);
 	ktrace_enter(iclog->ic_trace,
 		     (void *)((unsigned long)state),
 		     (void *)((unsigned long)current_pid()),
@@ -336,15 +336,12 @@
 	} else {
 		xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
 		xlog_regrant_reserve_log_space(log, ticket);
-	}
-
-	/* If this ticket was a permanent reservation and we aren't
-	 * trying to release it, reset the inited flags; so next time
-	 * we write, a start record will be written out.
-	 */
-	if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) &&
-	    (flags & XFS_LOG_REL_PERM_RESERV) == 0)
+		/* If this ticket was a permanent reservation and we aren't
+		 * trying to release it, reset the inited flags; so next time
+		 * we write, a start record will be written out.
+		 */
 		ticket->t_flags |= XLOG_TIC_INITED;
+	}
 
 	return lsn;
 }	/* xfs_log_done */
@@ -357,11 +354,11 @@
  * Asynchronous forces are implemented by setting the WANT_SYNC
  * bit in the appropriate in-core log and then returning.
  *
- * Synchronous forces are implemented with a semaphore.  All callers
- * to force a given lsn to disk will wait on a semaphore attached to the
+ * Synchronous forces are implemented with a signal variable. All callers
+ * to force a given lsn to disk will wait on a the sv attached to the
  * specific in-core log.  When given in-core log finally completes its
  * write to disk, that thread will wake up all threads waiting on the
- * semaphore.
+ * sv.
  */
 int
 _xfs_log_force(
@@ -588,12 +585,12 @@
  * mp		- ubiquitous xfs mount point structure
  */
 int
-xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags)
+xfs_log_mount_finish(xfs_mount_t *mp)
 {
 	int	error;
 
 	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
-		error = xlog_recover_finish(mp->m_log, mfsi_flags);
+		error = xlog_recover_finish(mp->m_log);
 	else {
 		error = 0;
 		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
@@ -707,7 +704,7 @@
 		if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
 		      iclog->ic_state == XLOG_STATE_DIRTY)) {
 			if (!XLOG_FORCED_SHUTDOWN(log)) {
-				sv_wait(&iclog->ic_forcesema, PMEM,
+				sv_wait(&iclog->ic_force_wait, PMEM,
 					&log->l_icloglock, s);
 			} else {
 				spin_unlock(&log->l_icloglock);
@@ -748,7 +745,7 @@
 			|| iclog->ic_state == XLOG_STATE_DIRTY
 			|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
 
-				sv_wait(&iclog->ic_forcesema, PMEM,
+				sv_wait(&iclog->ic_force_wait, PMEM,
 					&log->l_icloglock, s);
 		} else {
 			spin_unlock(&log->l_icloglock);
@@ -838,7 +835,7 @@
 				break;
 			tail_lsn = 0;
 			free_bytes -= tic->t_unit_res;
-			sv_signal(&tic->t_sema);
+			sv_signal(&tic->t_wait);
 			tic = tic->t_next;
 		} while (tic != log->l_write_headq);
 	}
@@ -859,7 +856,7 @@
 				break;
 			tail_lsn = 0;
 			free_bytes -= need_bytes;
-			sv_signal(&tic->t_sema);
+			sv_signal(&tic->t_wait);
 			tic = tic->t_next;
 		} while (tic != log->l_reserve_headq);
 	}
@@ -1285,8 +1282,8 @@
 
 		ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
 		ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
-		sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force");
-		sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write");
+		sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
+		sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
 
 		iclogp = &iclog->ic_next;
 	}
@@ -1565,8 +1562,8 @@
 
 	iclog = log->l_iclog;
 	for (i=0; i<log->l_iclog_bufs; i++) {
-		sv_destroy(&iclog->ic_forcesema);
-		sv_destroy(&iclog->ic_writesema);
+		sv_destroy(&iclog->ic_force_wait);
+		sv_destroy(&iclog->ic_write_wait);
 		xfs_buf_free(iclog->ic_bp);
 #ifdef XFS_LOG_TRACE
 		if (iclog->ic_trace != NULL) {
@@ -1976,7 +1973,7 @@
 /* Clean iclogs starting from the head.  This ordering must be
  * maintained, so an iclog doesn't become ACTIVE beyond one that
  * is SYNCING.  This is also required to maintain the notion that we use
- * a counting semaphore to hold off would be writers to the log when every
+ * a ordered wait queue to hold off would be writers to the log when every
  * iclog is trying to sync to disk.
  *
  * State Change: DIRTY -> ACTIVE
@@ -2240,7 +2237,7 @@
 			xlog_state_clean_log(log);
 
 			/* wake up threads waiting in xfs_log_force() */
-			sv_broadcast(&iclog->ic_forcesema);
+			sv_broadcast(&iclog->ic_force_wait);
 
 			iclog = iclog->ic_next;
 		} while (first_iclog != iclog);
@@ -2302,8 +2299,7 @@
  * the second completion goes through.
  *
  * Callbacks could take time, so they are done outside the scope of the
- * global state machine log lock.  Assume that the calls to cvsema won't
- * take a long time.  At least we know it won't sleep.
+ * global state machine log lock.
  */
 STATIC void
 xlog_state_done_syncing(
@@ -2339,7 +2335,7 @@
 	 * iclog buffer, we wake them all, one will get to do the
 	 * I/O, the others get to wait for the result.
 	 */
-	sv_broadcast(&iclog->ic_writesema);
+	sv_broadcast(&iclog->ic_write_wait);
 	spin_unlock(&log->l_icloglock);
 	xlog_state_do_callback(log, aborted, iclog);	/* also cleans log */
 }	/* xlog_state_done_syncing */
@@ -2347,11 +2343,9 @@
 
 /*
  * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
- * sleep.  The flush semaphore is set to the number of in-core buffers and
- * decremented around disk syncing.  Therefore, if all buffers are syncing,
- * this semaphore will cause new writes to sleep until a sync completes.
- * Otherwise, this code just does p() followed by v().  This approximates
- * a sleep/wakeup except we can't race.
+ * sleep.  We wait on the flush queue on the head iclog as that should be
+ * the first iclog to complete flushing. Hence if all iclogs are syncing,
+ * we will wait here and all new writes will sleep until a sync completes.
  *
  * The in-core logs are used in a circular fashion. They are not used
  * out-of-order even when an iclog past the head is free.
@@ -2508,7 +2502,7 @@
 			goto error_return;
 
 		XFS_STATS_INC(xs_sleep_logspace);
-		sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
+		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
 		/*
 		 * If we got an error, and the filesystem is shutting down,
 		 * we'll catch it down below. So just continue...
@@ -2534,7 +2528,7 @@
 		xlog_trace_loggrant(log, tic,
 				    "xlog_grant_log_space: sleep 2");
 		XFS_STATS_INC(xs_sleep_logspace);
-		sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
+		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
 
 		if (XLOG_FORCED_SHUTDOWN(log)) {
 			spin_lock(&log->l_grant_lock);
@@ -2633,7 +2627,7 @@
 			if (free_bytes < ntic->t_unit_res)
 				break;
 			free_bytes -= ntic->t_unit_res;
-			sv_signal(&ntic->t_sema);
+			sv_signal(&ntic->t_wait);
 			ntic = ntic->t_next;
 		} while (ntic != log->l_write_headq);
 
@@ -2644,7 +2638,7 @@
 			xlog_trace_loggrant(log, tic,
 				    "xlog_regrant_write_log_space: sleep 1");
 			XFS_STATS_INC(xs_sleep_logspace);
-			sv_wait(&tic->t_sema, PINOD|PLTWAIT,
+			sv_wait(&tic->t_wait, PINOD|PLTWAIT,
 				&log->l_grant_lock, s);
 
 			/* If we're shutting down, this tic is already
@@ -2673,7 +2667,7 @@
 		if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
 			xlog_ins_ticketq(&log->l_write_headq, tic);
 		XFS_STATS_INC(xs_sleep_logspace);
-		sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
+		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
 
 		/* If we're shutting down, this tic is already off the queue */
 		if (XLOG_FORCED_SHUTDOWN(log)) {
@@ -2916,7 +2910,7 @@
  *	2. the current iclog is drity, and the previous iclog is in the
  *		active or dirty state.
  *
- * We may sleep (call psema) if:
+ * We may sleep if:
  *
  *	1. the current iclog is not in the active nor dirty state.
  *	2. the current iclog dirty, and the previous iclog is not in the
@@ -3013,7 +3007,7 @@
 			return XFS_ERROR(EIO);
 		}
 		XFS_STATS_INC(xs_log_force_sleep);
-		sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s);
+		sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s);
 		/*
 		 * No need to grab the log lock here since we're
 		 * only deciding whether or not to return EIO
@@ -3096,7 +3090,7 @@
 						 XLOG_STATE_SYNCING))) {
 			ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
 			XFS_STATS_INC(xs_log_force_sleep);
-			sv_wait(&iclog->ic_prev->ic_writesema, PSWP,
+			sv_wait(&iclog->ic_prev->ic_write_wait, PSWP,
 				&log->l_icloglock, s);
 			*log_flushed = 1;
 			already_slept = 1;
@@ -3116,7 +3110,7 @@
 	    !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
 
 		/*
-		 * Don't wait on the forcesema if we know that we've
+		 * Don't wait on completion if we know that we've
 		 * gotten a log write error.
 		 */
 		if (iclog->ic_state & XLOG_STATE_IOERROR) {
@@ -3124,7 +3118,7 @@
 			return XFS_ERROR(EIO);
 		}
 		XFS_STATS_INC(xs_log_force_sleep);
-		sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s);
+		sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
 		/*
 		 * No need to grab the log lock here since we're
 		 * only deciding whether or not to return EIO
@@ -3180,7 +3174,7 @@
 xlog_ticket_put(xlog_t		*log,
 		xlog_ticket_t	*ticket)
 {
-	sv_destroy(&ticket->t_sema);
+	sv_destroy(&ticket->t_wait);
 	kmem_zone_free(xfs_log_ticket_zone, ticket);
 }	/* xlog_ticket_put */
 
@@ -3270,7 +3264,7 @@
 	tic->t_trans_type	= 0;
 	if (xflags & XFS_LOG_PERM_RESERV)
 		tic->t_flags |= XLOG_TIC_PERM_RESERV;
-	sv_init(&(tic->t_sema), SV_DEFAULT, "logtick");
+	sv_init(&(tic->t_wait), SV_DEFAULT, "logtick");
 
 	xlog_tic_reset_res(tic);
 
@@ -3557,14 +3551,14 @@
 	 */
 	if ((tic = log->l_reserve_headq)) {
 		do {
-			sv_signal(&tic->t_sema);
+			sv_signal(&tic->t_wait);
 			tic = tic->t_next;
 		} while (tic != log->l_reserve_headq);
 	}
 
 	if ((tic = log->l_write_headq)) {
 		do {
-			sv_signal(&tic->t_sema);
+			sv_signal(&tic->t_wait);
 			tic = tic->t_next;
 		} while (tic != log->l_write_headq);
 	}
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index d1d678e..d47b91f 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -149,7 +149,7 @@
 			struct xfs_buftarg	*log_target,
 			xfs_daddr_t		start_block,
 			int		 	num_bblocks);
-int	  xfs_log_mount_finish(struct xfs_mount *mp, int);
+int	  xfs_log_mount_finish(struct xfs_mount *mp);
 void	  xfs_log_move_tail(struct xfs_mount	*mp,
 			    xfs_lsn_t		tail_lsn);
 int	  xfs_log_notify(struct xfs_mount	*mp,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 6245913..c8a5b22 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -241,7 +241,7 @@
 } xlog_res_t;
 
 typedef struct xlog_ticket {
-	sv_t		   t_sema;	 /* sleep on this semaphore      : 20 */
+	sv_t		   t_wait;	 /* ticket wait queue            : 20 */
 	struct xlog_ticket *t_next;	 /*			         :4|8 */
 	struct xlog_ticket *t_prev;	 /*				 :4|8 */
 	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
@@ -314,7 +314,7 @@
  *	xlog_rec_header_t into the reserved space.
  * - ic_data follows, so a write to disk can start at the beginning of
  *	the iclog.
- * - ic_forcesema is used to implement synchronous forcing of the iclog to disk.
+ * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
  * - ic_next is the pointer to the next iclog in the ring.
  * - ic_bp is a pointer to the buffer used to write this incore log to disk.
  * - ic_log is a pointer back to the global log structure.
@@ -339,8 +339,8 @@
  * and move everything else out to subsequent cachelines.
  */
 typedef struct xlog_iclog_fields {
-	sv_t			ic_forcesema;
-	sv_t			ic_writesema;
+	sv_t			ic_force_wait;
+	sv_t			ic_write_wait;
 	struct xlog_in_core	*ic_next;
 	struct xlog_in_core	*ic_prev;
 	struct xfs_buf		*ic_bp;
@@ -377,8 +377,8 @@
 /*
  * Defines to save our code from this glop.
  */
-#define	ic_forcesema	hic_fields.ic_forcesema
-#define ic_writesema	hic_fields.ic_writesema
+#define	ic_force_wait	hic_fields.ic_force_wait
+#define ic_write_wait	hic_fields.ic_write_wait
 #define	ic_next		hic_fields.ic_next
 #define	ic_prev		hic_fields.ic_prev
 #define	ic_bp		hic_fields.ic_bp
@@ -468,7 +468,7 @@
 				xfs_daddr_t *head_blk,
 				xfs_daddr_t *tail_blk);
 extern int	 xlog_recover(xlog_t *log);
-extern int	 xlog_recover_finish(xlog_t *log, int mfsi_flags);
+extern int	 xlog_recover_finish(xlog_t *log);
 extern void	 xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
 extern void	 xlog_recover_process_iunlinks(xlog_t *log);
 
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 9eb722e..82d46ce 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3940,8 +3940,7 @@
  */
 int
 xlog_recover_finish(
-	xlog_t		*log,
-	int		mfsi_flags)
+	xlog_t		*log)
 {
 	/*
 	 * Now we're ready to do the transactions needed for the
@@ -3969,9 +3968,7 @@
 		xfs_log_force(log->l_mp, (xfs_lsn_t)0,
 			      (XFS_LOG_FORCE | XFS_LOG_SYNC));
 
-		if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) {
-			xlog_recover_process_iunlinks(log);
-		}
+		xlog_recover_process_iunlinks(log);
 
 		xlog_recover_check_summary(log);
 
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 6c5d132..a4503f5 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -128,7 +128,7 @@
  * initialized.
  */
 STATIC void
-xfs_mount_free(
+xfs_free_perag(
 	xfs_mount_t	*mp)
 {
 	if (mp->m_perag) {
@@ -139,20 +139,6 @@
 				kmem_free(mp->m_perag[agno].pagb_list);
 		kmem_free(mp->m_perag);
 	}
-
-	spinlock_destroy(&mp->m_ail_lock);
-	spinlock_destroy(&mp->m_sb_lock);
-	mutex_destroy(&mp->m_ilock);
-	mutex_destroy(&mp->m_growlock);
-	if (mp->m_quotainfo)
-		XFS_QM_DONE(mp);
-
-	if (mp->m_fsname != NULL)
-		kmem_free(mp->m_fsname);
-	if (mp->m_rtname != NULL)
-		kmem_free(mp->m_rtname);
-	if (mp->m_logname != NULL)
-		kmem_free(mp->m_logname);
 }
 
 /*
@@ -704,11 +690,11 @@
  * Update alignment values based on mount options and sb values
  */
 STATIC int
-xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags)
+xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags)
 {
 	xfs_sb_t	*sbp = &(mp->m_sb);
 
-	if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) {
+	if (mp->m_dalign) {
 		/*
 		 * If stripe unit and stripe width are not multiples
 		 * of the fs blocksize turn off alignment.
@@ -864,7 +850,7 @@
  * Check that the data (and log if separate) are an ok size.
  */
 STATIC int
-xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
+xfs_check_sizes(xfs_mount_t *mp)
 {
 	xfs_buf_t	*bp;
 	xfs_daddr_t	d;
@@ -887,8 +873,7 @@
 		return error;
 	}
 
-	if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) &&
-	    mp->m_logdev_targp != mp->m_ddev_targp) {
+	if (mp->m_logdev_targp != mp->m_ddev_targp) {
 		d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
 		if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
 			cmn_err(CE_WARN, "XFS: size check 3 failed");
@@ -923,15 +908,13 @@
  */
 int
 xfs_mountfs(
-	xfs_mount_t	*mp,
-	int		mfsi_flags)
+	xfs_mount_t	*mp)
 {
 	xfs_sb_t	*sbp = &(mp->m_sb);
 	xfs_inode_t	*rip;
 	__uint64_t	resblks;
 	__int64_t	update_flags = 0LL;
 	uint		quotamount, quotaflags;
-	int		agno;
 	int		uuid_mounted = 0;
 	int		error = 0;
 
@@ -985,7 +968,7 @@
 	 * allocator alignment is within an ag, therefore ag has
 	 * to be aligned at stripe boundary.
 	 */
-	error = xfs_update_alignment(mp, mfsi_flags, &update_flags);
+	error = xfs_update_alignment(mp, &update_flags);
 	if (error)
 		goto error1;
 
@@ -1004,8 +987,7 @@
 	 * since a single partition filesystem is identical to a single
 	 * partition volume/filesystem.
 	 */
-	if ((mfsi_flags & XFS_MFSI_SECOND) == 0 &&
-	    (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
+	if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
 		if (xfs_uuid_mount(mp)) {
 			error = XFS_ERROR(EINVAL);
 			goto error1;
@@ -1033,7 +1015,7 @@
 	/*
 	 * Check that the data (and log if separate) are an ok size.
 	 */
-	error = xfs_check_sizes(mp, mfsi_flags);
+	error = xfs_check_sizes(mp);
 	if (error)
 		goto error1;
 
@@ -1047,13 +1029,6 @@
 	}
 
 	/*
-	 * For client case we are done now
-	 */
-	if (mfsi_flags & XFS_MFSI_CLIENT) {
-		return 0;
-	}
-
-	/*
 	 *  Copies the low order bits of the timestamp and the randomly
 	 *  set "sequence" number out of a UUID.
 	 */
@@ -1077,8 +1052,10 @@
 	 * Allocate and initialize the per-ag data.
 	 */
 	init_rwsem(&mp->m_peraglock);
-	mp->m_perag =
-		kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP);
+	mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t),
+				  KM_MAYFAIL);
+	if (!mp->m_perag)
+		goto error1;
 
 	mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
 
@@ -1190,7 +1167,7 @@
 	 * delayed until after the root and real-time bitmap inodes
 	 * were consistently read in.
 	 */
-	error = xfs_log_mount_finish(mp, mfsi_flags);
+	error = xfs_log_mount_finish(mp);
 	if (error) {
 		cmn_err(CE_WARN, "XFS: log mount finish failed");
 		goto error4;
@@ -1199,7 +1176,7 @@
 	/*
 	 * Complete the quota initialisation, post-log-replay component.
 	 */
-	error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags);
+	error = XFS_QM_MOUNT(mp, quotamount, quotaflags);
 	if (error)
 		goto error4;
 
@@ -1233,12 +1210,7 @@
  error3:
 	xfs_log_unmount_dealloc(mp);
  error2:
-	for (agno = 0; agno < sbp->sb_agcount; agno++)
-		if (mp->m_perag[agno].pagb_list)
-			kmem_free(mp->m_perag[agno].pagb_list);
-	kmem_free(mp->m_perag);
-	mp->m_perag = NULL;
-	/* FALLTHROUGH */
+	xfs_free_perag(mp);
  error1:
 	if (uuid_mounted)
 		uuid_table_remove(&mp->m_sb.sb_uuid);
@@ -1246,16 +1218,17 @@
 }
 
 /*
- * xfs_unmountfs
- *
  * This flushes out the inodes,dquots and the superblock, unmounts the
  * log and makes sure that incore structures are freed.
  */
-int
-xfs_unmountfs(xfs_mount_t *mp)
+void
+xfs_unmountfs(
+	struct xfs_mount	*mp)
 {
-	__uint64_t	resblks;
-	int		error = 0;
+	__uint64_t		resblks;
+	int			error;
+
+	IRELE(mp->m_rootip);
 
 	/*
 	 * We can potentially deadlock here if we have an inode cluster
@@ -1312,8 +1285,6 @@
 	xfs_unmountfs_wait(mp); 		/* wait for async bufs */
 	xfs_log_unmount(mp);			/* Done! No more fs ops. */
 
-	xfs_freesb(mp);
-
 	/*
 	 * All inodes from this mount point should be freed.
 	 */
@@ -1322,11 +1293,12 @@
 	if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
 		uuid_table_remove(&mp->m_sb.sb_uuid);
 
-#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
+#if defined(DEBUG)
 	xfs_errortag_clearall(mp, 0);
 #endif
-	xfs_mount_free(mp);
-	return 0;
+	xfs_free_perag(mp);
+	if (mp->m_quotainfo)
+		XFS_QM_DONE(mp);
 }
 
 STATIC void
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 5269bd6..f3c1024 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -114,7 +114,7 @@
 struct xfs_quotainfo;
 
 typedef int	(*xfs_qminit_t)(struct xfs_mount *, uint *, uint *);
-typedef int	(*xfs_qmmount_t)(struct xfs_mount *, uint, uint, int);
+typedef int	(*xfs_qmmount_t)(struct xfs_mount *, uint, uint);
 typedef int	(*xfs_qmunmount_t)(struct xfs_mount *);
 typedef void	(*xfs_qmdone_t)(struct xfs_mount *);
 typedef void	(*xfs_dqrele_t)(struct xfs_dquot *);
@@ -158,8 +158,8 @@
 
 #define XFS_QM_INIT(mp, mnt, fl) \
 	(*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl)
-#define XFS_QM_MOUNT(mp, mnt, fl, mfsi_flags) \
-	(*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl, mfsi_flags)
+#define XFS_QM_MOUNT(mp, mnt, fl) \
+	(*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl)
 #define XFS_QM_UNMOUNT(mp) \
 	(*(mp)->m_qm_ops->xfs_qmunmount)(mp)
 #define XFS_QM_DONE(mp) \
@@ -442,13 +442,6 @@
 /*
  * Flags for xfs_mountfs
  */
-#define XFS_MFSI_SECOND		0x01	/* Secondary mount -- skip stuff */
-#define XFS_MFSI_CLIENT		0x02	/* Is a client -- skip lots of stuff */
-/*	XFS_MFSI_RRINODES	*/
-#define XFS_MFSI_NOUNLINK	0x08	/* Skip unlinked inode processing in */
-					/* log recovery */
-#define XFS_MFSI_NO_QUOTACHECK	0x10	/* Skip quotacheck processing */
-/*	XFS_MFSI_CONVERT_SUNIT	*/
 #define XFS_MFSI_QUIET		0x40	/* Be silent if mount errors found */
 
 #define XFS_DADDR_TO_AGNO(mp,d)         xfs_daddr_to_agno(mp,d)
@@ -517,10 +510,10 @@
 
 extern void	xfs_mod_sb(xfs_trans_t *, __int64_t);
 extern int	xfs_log_sbcount(xfs_mount_t *, uint);
-extern int	xfs_mountfs(xfs_mount_t *mp, int);
+extern int	xfs_mountfs(xfs_mount_t *mp);
 extern void	xfs_mountfs_check_barriers(xfs_mount_t *mp);
 
-extern int	xfs_unmountfs(xfs_mount_t *);
+extern void	xfs_unmountfs(xfs_mount_t *);
 extern int	xfs_unmountfs_writesb(xfs_mount_t *);
 extern int	xfs_unmount_flush(xfs_mount_t *, int);
 extern int	xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index bf87a59..e2f68de 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -74,18 +74,6 @@
  */
 
 /*
- * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set.
- */
-STATIC int
-xfs_lowbit32(
-	__uint32_t	v)
-{
-	if (v)
-		return ffs(v) - 1;
-	return -1;
-}
-
-/*
  * Allocate space to the bitmap or summary file, and zero it, for growfs.
  */
 STATIC int				/* error */
@@ -450,6 +438,7 @@
 	}
 	bbno = XFS_BITTOBLOCK(mp, bno);
 	i = 0;
+	ASSERT(minlen != 0);
 	log2len = xfs_highbit32(minlen);
 	/*
 	 * Loop over all bitmap blocks (bbno + i is current block).
@@ -618,6 +607,8 @@
 	xfs_suminfo_t	sum;		/* summary information for extents */
 
 	ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+	ASSERT(maxlen != 0);
+
 	/*
 	 * Loop over all the levels starting with maxlen.
 	 * At each level, look at all the bitmap blocks, to see if there
@@ -675,6 +666,9 @@
 		*rtblock = NULLRTBLOCK;
 		return 0;
 	}
+	ASSERT(minlen != 0);
+	ASSERT(maxlen != 0);
+
 	/*
 	 * Loop over sizes, from maxlen down to minlen.
 	 * This time, when we do the allocations, allow smaller ones
@@ -1961,6 +1955,7 @@
 				  nsbp->sb_blocksize * nsbp->sb_rextsize);
 		nsbp->sb_rextents = nsbp->sb_rblocks;
 		do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
+		ASSERT(nsbp->sb_rextents != 0);
 		nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents);
 		nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
 		nrsumsize =
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index b0f31c0..3a82576 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -314,7 +314,7 @@
 		 * ASYNC buffers.
 		 */
 		XFS_BUF_ERROR(bp, EIO);
-		XFS_BUF_V_IODONESEMA(bp);
+		XFS_BUF_FINISH_IOWAIT(bp);
 	} else {
 		xfs_buf_relse(bp);
 	}
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index e4ebddd..4e1c22a 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -43,6 +43,7 @@
 #include "xfs_quota.h"
 #include "xfs_trans_priv.h"
 #include "xfs_trans_space.h"
+#include "xfs_inode_item.h"
 
 
 STATIC void	xfs_trans_apply_sb_deltas(xfs_trans_t *);
@@ -253,7 +254,7 @@
 	tp->t_mountp = mp;
 	tp->t_items_free = XFS_LIC_NUM_SLOTS;
 	tp->t_busy_free = XFS_LBC_NUM_SLOTS;
-	XFS_LIC_INIT(&(tp->t_items));
+	xfs_lic_init(&(tp->t_items));
 	XFS_LBC_INIT(&(tp->t_busy));
 	return tp;
 }
@@ -282,7 +283,7 @@
 	ntp->t_mountp = tp->t_mountp;
 	ntp->t_items_free = XFS_LIC_NUM_SLOTS;
 	ntp->t_busy_free = XFS_LBC_NUM_SLOTS;
-	XFS_LIC_INIT(&(ntp->t_items));
+	xfs_lic_init(&(ntp->t_items));
 	XFS_LBC_INIT(&(ntp->t_busy));
 
 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
@@ -1169,7 +1170,7 @@
 		while (licp != NULL) {
 			lidp = licp->lic_descs;
 			for (i = 0; i < licp->lic_unused; i++, lidp++) {
-				if (XFS_LIC_ISFREE(licp, i)) {
+				if (xfs_lic_isfree(licp, i)) {
 					continue;
 				}
 
@@ -1216,6 +1217,68 @@
 	kmem_zone_free(xfs_trans_zone, tp);
 }
 
+/*
+ * Roll from one trans in the sequence of PERMANENT transactions to
+ * the next: permanent transactions are only flushed out when
+ * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
+ * as possible to let chunks of it go to the log. So we commit the
+ * chunk we've been working on and get a new transaction to continue.
+ */
+int
+xfs_trans_roll(
+	struct xfs_trans	**tpp,
+	struct xfs_inode	*dp)
+{
+	struct xfs_trans	*trans;
+	unsigned int		logres, count;
+	int			error;
+
+	/*
+	 * Ensure that the inode is always logged.
+	 */
+	trans = *tpp;
+	xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
+
+	/*
+	 * Copy the critical parameters from one trans to the next.
+	 */
+	logres = trans->t_log_res;
+	count = trans->t_log_count;
+	*tpp = xfs_trans_dup(trans);
+
+	/*
+	 * Commit the current transaction.
+	 * If this commit failed, then it'd just unlock those items that
+	 * are not marked ihold. That also means that a filesystem shutdown
+	 * is in progress. The caller takes the responsibility to cancel
+	 * the duplicate transaction that gets returned.
+	 */
+	error = xfs_trans_commit(trans, 0);
+	if (error)
+		return (error);
+
+	trans = *tpp;
+
+	/*
+	 * Reserve space in the log for th next transaction.
+	 * This also pushes items in the "AIL", the list of logged items,
+	 * out to disk if they are taking up space at the tail of the log
+	 * that we want to use.  This requires that either nothing be locked
+	 * across this call, or that anything that is locked be logged in
+	 * the prior and the next transactions.
+	 */
+	error = xfs_trans_reserve(trans, 0, logres, 0,
+				  XFS_TRANS_PERM_LOG_RES, count);
+	/*
+	 *  Ensure that the inode is in the new transaction and locked.
+	 */
+	if (error)
+		return error;
+
+	xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
+	xfs_trans_ihold(trans, dp);
+	return 0;
+}
 
 /*
  * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item().
@@ -1253,7 +1316,7 @@
 	 * Special case the chunk embedded in the transaction.
 	 */
 	licp = &(tp->t_items);
-	if (!(XFS_LIC_ARE_ALL_FREE(licp))) {
+	if (!(xfs_lic_are_all_free(licp))) {
 		xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
 	}
 
@@ -1262,7 +1325,7 @@
 	 */
 	licp = licp->lic_next;
 	while (licp != NULL) {
-		ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+		ASSERT(!xfs_lic_are_all_free(licp));
 		xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
 		next_licp = licp->lic_next;
 		kmem_free(licp);
@@ -1325,7 +1388,7 @@
 
 	lidp = licp->lic_descs;
 	for (i = 0; i < licp->lic_unused; i++, lidp++) {
-		if (XFS_LIC_ISFREE(licp, i)) {
+		if (xfs_lic_isfree(licp, i)) {
 			continue;
 		}
 
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 0804207..74c80bd 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -210,62 +210,52 @@
  * lic_unused to the right value (0 matches all free).  The
  * lic_descs.lid_index values are set up as each desc is allocated.
  */
-#define	XFS_LIC_INIT(cp)	xfs_lic_init(cp)
 static inline void xfs_lic_init(xfs_log_item_chunk_t *cp)
 {
 	cp->lic_free = XFS_LIC_FREEMASK;
 }
 
-#define	XFS_LIC_INIT_SLOT(cp,slot)	xfs_lic_init_slot(cp, slot)
 static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot)
 {
 	cp->lic_descs[slot].lid_index = (unsigned char)(slot);
 }
 
-#define	XFS_LIC_VACANCY(cp)		xfs_lic_vacancy(cp)
 static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp)
 {
 	return cp->lic_free & XFS_LIC_FREEMASK;
 }
 
-#define	XFS_LIC_ALL_FREE(cp)		xfs_lic_all_free(cp)
 static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp)
 {
 	cp->lic_free = XFS_LIC_FREEMASK;
 }
 
-#define	XFS_LIC_ARE_ALL_FREE(cp)	xfs_lic_are_all_free(cp)
 static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp)
 {
 	return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK);
 }
 
-#define	XFS_LIC_ISFREE(cp,slot)	xfs_lic_isfree(cp,slot)
 static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot)
 {
 	return (cp->lic_free & (1 << slot));
 }
 
-#define	XFS_LIC_CLAIM(cp,slot)		xfs_lic_claim(cp,slot)
 static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot)
 {
 	cp->lic_free &= ~(1 << slot);
 }
 
-#define	XFS_LIC_RELSE(cp,slot)		xfs_lic_relse(cp,slot)
 static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot)
 {
 	cp->lic_free |= 1 << slot;
 }
 
-#define	XFS_LIC_SLOT(cp,slot)		xfs_lic_slot(cp,slot)
 static inline xfs_log_item_desc_t *
 xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot)
 {
 	return &(cp->lic_descs[slot]);
 }
 
-#define	XFS_LIC_DESC_TO_SLOT(dp)	xfs_lic_desc_to_slot(dp)
 static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp)
 {
 	return (uint)dp->lid_index;
@@ -278,7 +268,6 @@
  * All of this yields the address of the chunk, which is
  * cast to a chunk pointer.
  */
-#define	XFS_LIC_DESC_TO_CHUNK(dp)	xfs_lic_desc_to_chunk(dp)
 static inline xfs_log_item_chunk_t *
 xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
 {
@@ -986,6 +975,7 @@
 				  int *);
 #define xfs_trans_commit(tp, flags)	_xfs_trans_commit(tp, flags, NULL)
 void		xfs_trans_cancel(xfs_trans_t *, int);
+int		xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
 int		xfs_trans_ail_init(struct xfs_mount *);
 void		xfs_trans_ail_destroy(struct xfs_mount *);
 void		xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index cb0c583..4e855b5 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -1021,16 +1021,16 @@
 	bp = NULL;
 	len = BBTOB(len);
 	licp = &tp->t_items;
-	if (!XFS_LIC_ARE_ALL_FREE(licp)) {
+	if (!xfs_lic_are_all_free(licp)) {
 		for (i = 0; i < licp->lic_unused; i++) {
 			/*
 			 * Skip unoccupied slots.
 			 */
-			if (XFS_LIC_ISFREE(licp, i)) {
+			if (xfs_lic_isfree(licp, i)) {
 				continue;
 			}
 
-			lidp = XFS_LIC_SLOT(licp, i);
+			lidp = xfs_lic_slot(licp, i);
 			blip = (xfs_buf_log_item_t *)lidp->lid_item;
 			if (blip->bli_item.li_type != XFS_LI_BUF) {
 				continue;
@@ -1074,7 +1074,7 @@
 	bp = NULL;
 	len = BBTOB(len);
 	for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
-		if (XFS_LIC_ARE_ALL_FREE(licp)) {
+		if (xfs_lic_are_all_free(licp)) {
 			ASSERT(licp == &tp->t_items);
 			ASSERT(licp->lic_next == NULL);
 			return NULL;
@@ -1083,11 +1083,11 @@
 			/*
 			 * Skip unoccupied slots.
 			 */
-			if (XFS_LIC_ISFREE(licp, i)) {
+			if (xfs_lic_isfree(licp, i)) {
 				continue;
 			}
 
-			lidp = XFS_LIC_SLOT(licp, i);
+			lidp = xfs_lic_slot(licp, i);
 			blip = (xfs_buf_log_item_t *)lidp->lid_item;
 			if (blip->bli_item.li_type != XFS_LI_BUF) {
 				continue;
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c
index db5c835..3c666e8 100644
--- a/fs/xfs/xfs_trans_item.c
+++ b/fs/xfs/xfs_trans_item.c
@@ -53,11 +53,11 @@
 		 * Initialize the chunk, and then
 		 * claim the first slot in the newly allocated chunk.
 		 */
-		XFS_LIC_INIT(licp);
-		XFS_LIC_CLAIM(licp, 0);
+		xfs_lic_init(licp);
+		xfs_lic_claim(licp, 0);
 		licp->lic_unused = 1;
-		XFS_LIC_INIT_SLOT(licp, 0);
-		lidp = XFS_LIC_SLOT(licp, 0);
+		xfs_lic_init_slot(licp, 0);
+		lidp = xfs_lic_slot(licp, 0);
 
 		/*
 		 * Link in the new chunk and update the free count.
@@ -88,14 +88,14 @@
 	 */
 	licp = &tp->t_items;
 	while (licp != NULL) {
-		if (XFS_LIC_VACANCY(licp)) {
+		if (xfs_lic_vacancy(licp)) {
 			if (licp->lic_unused <= XFS_LIC_MAX_SLOT) {
 				i = licp->lic_unused;
-				ASSERT(XFS_LIC_ISFREE(licp, i));
+				ASSERT(xfs_lic_isfree(licp, i));
 				break;
 			}
 			for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) {
-				if (XFS_LIC_ISFREE(licp, i))
+				if (xfs_lic_isfree(licp, i))
 					break;
 			}
 			ASSERT(i <= XFS_LIC_MAX_SLOT);
@@ -108,12 +108,12 @@
 	 * If we find a free descriptor, claim it,
 	 * initialize it, and return it.
 	 */
-	XFS_LIC_CLAIM(licp, i);
+	xfs_lic_claim(licp, i);
 	if (licp->lic_unused <= i) {
 		licp->lic_unused = i + 1;
-		XFS_LIC_INIT_SLOT(licp, i);
+		xfs_lic_init_slot(licp, i);
 	}
-	lidp = XFS_LIC_SLOT(licp, i);
+	lidp = xfs_lic_slot(licp, i);
 	tp->t_items_free--;
 	lidp->lid_item = lip;
 	lidp->lid_flags = 0;
@@ -136,9 +136,9 @@
 	xfs_log_item_chunk_t	*licp;
 	xfs_log_item_chunk_t	**licpp;
 
-	slot = XFS_LIC_DESC_TO_SLOT(lidp);
-	licp = XFS_LIC_DESC_TO_CHUNK(lidp);
-	XFS_LIC_RELSE(licp, slot);
+	slot = xfs_lic_desc_to_slot(lidp);
+	licp = xfs_lic_desc_to_chunk(lidp);
+	xfs_lic_relse(licp, slot);
 	lidp->lid_item->li_desc = NULL;
 	tp->t_items_free++;
 
@@ -154,7 +154,7 @@
 	 * Also decrement the transaction structure's count of free items
 	 * by the number in a chunk since we are freeing an empty chunk.
 	 */
-	if (XFS_LIC_ARE_ALL_FREE(licp) && (licp != &(tp->t_items))) {
+	if (xfs_lic_are_all_free(licp) && (licp != &(tp->t_items))) {
 		licpp = &(tp->t_items.lic_next);
 		while (*licpp != licp) {
 			ASSERT(*licpp != NULL);
@@ -207,20 +207,20 @@
 	/*
 	 * If it's not in the first chunk, skip to the second.
 	 */
-	if (XFS_LIC_ARE_ALL_FREE(licp)) {
+	if (xfs_lic_are_all_free(licp)) {
 		licp = licp->lic_next;
 	}
 
 	/*
 	 * Return the first non-free descriptor in the chunk.
 	 */
-	ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+	ASSERT(!xfs_lic_are_all_free(licp));
 	for (i = 0; i < licp->lic_unused; i++) {
-		if (XFS_LIC_ISFREE(licp, i)) {
+		if (xfs_lic_isfree(licp, i)) {
 			continue;
 		}
 
-		return XFS_LIC_SLOT(licp, i);
+		return xfs_lic_slot(licp, i);
 	}
 	cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item");
 	return NULL;
@@ -242,18 +242,18 @@
 	xfs_log_item_chunk_t	*licp;
 	int			i;
 
-	licp = XFS_LIC_DESC_TO_CHUNK(lidp);
+	licp = xfs_lic_desc_to_chunk(lidp);
 
 	/*
 	 * First search the rest of the chunk. The for loop keeps us
 	 * from referencing things beyond the end of the chunk.
 	 */
-	for (i = (int)XFS_LIC_DESC_TO_SLOT(lidp) + 1; i < licp->lic_unused; i++) {
-		if (XFS_LIC_ISFREE(licp, i)) {
+	for (i = (int)xfs_lic_desc_to_slot(lidp) + 1; i < licp->lic_unused; i++) {
+		if (xfs_lic_isfree(licp, i)) {
 			continue;
 		}
 
-		return XFS_LIC_SLOT(licp, i);
+		return xfs_lic_slot(licp, i);
 	}
 
 	/*
@@ -266,13 +266,13 @@
 	}
 
 	licp = licp->lic_next;
-	ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+	ASSERT(!xfs_lic_are_all_free(licp));
 	for (i = 0; i < licp->lic_unused; i++) {
-		if (XFS_LIC_ISFREE(licp, i)) {
+		if (xfs_lic_isfree(licp, i)) {
 			continue;
 		}
 
-		return XFS_LIC_SLOT(licp, i);
+		return xfs_lic_slot(licp, i);
 	}
 	ASSERT(0);
 	/* NOTREACHED */
@@ -300,9 +300,9 @@
 	/*
 	 * Special case the embedded chunk so we don't free it below.
 	 */
-	if (!XFS_LIC_ARE_ALL_FREE(licp)) {
+	if (!xfs_lic_are_all_free(licp)) {
 		(void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN);
-		XFS_LIC_ALL_FREE(licp);
+		xfs_lic_all_free(licp);
 		licp->lic_unused = 0;
 	}
 	licp = licp->lic_next;
@@ -311,7 +311,7 @@
 	 * Unlock each item in each chunk and free the chunks.
 	 */
 	while (licp != NULL) {
-		ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+		ASSERT(!xfs_lic_are_all_free(licp));
 		(void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN);
 		next_licp = licp->lic_next;
 		kmem_free(licp);
@@ -347,7 +347,7 @@
 	/*
 	 * Special case the embedded chunk so we don't free.
 	 */
-	if (!XFS_LIC_ARE_ALL_FREE(licp)) {
+	if (!xfs_lic_are_all_free(licp)) {
 		freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
 	}
 	licpp = &(tp->t_items.lic_next);
@@ -358,10 +358,10 @@
 	 * and free empty chunks.
 	 */
 	while (licp != NULL) {
-		ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+		ASSERT(!xfs_lic_are_all_free(licp));
 		freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
 		next_licp = licp->lic_next;
-		if (XFS_LIC_ARE_ALL_FREE(licp)) {
+		if (xfs_lic_are_all_free(licp)) {
 			*licpp = next_licp;
 			kmem_free(licp);
 			freed -= XFS_LIC_NUM_SLOTS;
@@ -402,7 +402,7 @@
 	freed = 0;
 	lidp = licp->lic_descs;
 	for (i = 0; i < licp->lic_unused; i++, lidp++) {
-		if (XFS_LIC_ISFREE(licp, i)) {
+		if (xfs_lic_isfree(licp, i)) {
 			continue;
 		}
 		lip = lidp->lid_item;
@@ -421,7 +421,7 @@
 		 */
 		if (!(freeing_chunk) &&
 		    (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) {
-			XFS_LIC_RELSE(licp, i);
+			xfs_lic_relse(licp, i);
 			freed++;
 		}
 	}
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 98e5f11..35d4d41 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -237,7 +237,7 @@
 
 	ASSERT (ip->i_d.di_nlink > 0);
 	ip->i_d.di_nlink--;
-	drop_nlink(ip->i_vnode);
+	drop_nlink(VFS_I(ip));
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
 	error = 0;
@@ -301,7 +301,7 @@
 
 	ASSERT(ip->i_d.di_nlink > 0);
 	ip->i_d.di_nlink++;
-	inc_nlink(ip->i_vnode);
+	inc_nlink(VFS_I(ip));
 	if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) &&
 	    (ip->i_d.di_nlink > XFS_MAXLINK_1)) {
 		/*
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index f316cb8..ef32122 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -18,9 +18,6 @@
 #ifndef __XFS_UTILS_H__
 #define __XFS_UTILS_H__
 
-#define IRELE(ip)	VN_RELE(XFS_ITOV(ip))
-#define IHOLD(ip)	VN_HOLD(XFS_ITOV(ip))
-
 extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *);
 extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
 				xfs_dev_t, cred_t *, prid_t, int,
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 4a9a433..439dd39 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -128,7 +128,6 @@
 	xfs_inode_t	*rip = mp->m_rootip;
 	xfs_inode_t	*rbmip;
 	xfs_inode_t	*rsumip = NULL;
-	bhv_vnode_t	*rvp = XFS_ITOV(rip);
 	int		error;
 
 	xfs_ilock(rip, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
@@ -146,7 +145,7 @@
 		if (error == EFSCORRUPTED)
 			goto fscorrupt_out;
 
-		ASSERT(vn_count(XFS_ITOV(rbmip)) == 1);
+		ASSERT(vn_count(VFS_I(rbmip)) == 1);
 
 		rsumip = mp->m_rsumip;
 		xfs_ilock(rsumip, XFS_ILOCK_EXCL);
@@ -157,7 +156,7 @@
 		if (error == EFSCORRUPTED)
 			goto fscorrupt_out;
 
-		ASSERT(vn_count(XFS_ITOV(rsumip)) == 1);
+		ASSERT(vn_count(VFS_I(rsumip)) == 1);
 	}
 
 	/*
@@ -167,7 +166,7 @@
 	if (error == EFSCORRUPTED)
 		goto fscorrupt_out2;
 
-	if (vn_count(rvp) != 1 && !relocation) {
+	if (vn_count(VFS_I(rip)) != 1 && !relocation) {
 		xfs_iunlock(rip, XFS_ILOCK_EXCL);
 		return XFS_ERROR(EBUSY);
 	}
@@ -284,7 +283,7 @@
 	int             *bypassed)
 {
 	xfs_inode_t	*ip = NULL;
-	bhv_vnode_t	*vp = NULL;
+	struct inode	*vp = NULL;
 	int		error;
 	int		last_error;
 	uint64_t	fflag;
@@ -404,7 +403,7 @@
 			continue;
 		}
 
-		vp = XFS_ITOV_NULL(ip);
+		vp = VFS_I(ip);
 
 		/*
 		 * If the vnode is gone then this is being torn down,
@@ -479,7 +478,7 @@
 			IPOINTER_INSERT(ip, mp);
 			xfs_ilock(ip, lock_flags);
 
-			ASSERT(vp == XFS_ITOV(ip));
+			ASSERT(vp == VFS_I(ip));
 			ASSERT(ip->i_mount == mp);
 
 			vnode_refed = B_TRUE;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 76a1166..aa238c8 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -83,7 +83,7 @@
 	cred_t			*credp)
 {
 	xfs_mount_t		*mp = ip->i_mount;
-	struct inode		*inode = XFS_ITOV(ip);
+	struct inode		*inode = VFS_I(ip);
 	int			mask = iattr->ia_valid;
 	xfs_trans_t		*tp;
 	int			code;
@@ -182,7 +182,7 @@
 	xfs_ilock(ip, lock_flags);
 
 	/* boolean: are we the file owner? */
-	file_owner = (current_fsuid(credp) == ip->i_d.di_uid);
+	file_owner = (current_fsuid() == ip->i_d.di_uid);
 
 	/*
 	 * Change various properties of a file.
@@ -513,7 +513,6 @@
 			ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
 			ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
 			ip->i_update_core = 1;
-			timeflags &= ~XFS_ICHGTIME_ACC;
 		}
 		if (mask & ATTR_MTIME) {
 			inode->i_mtime = iattr->ia_mtime;
@@ -714,7 +713,7 @@
 		return XFS_ERROR(EIO);
 
 	/* capture size updates in I/O completion before writing the inode. */
-	error = filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
+	error = filemap_fdatawait(VFS_I(ip)->i_mapping);
 	if (error)
 		return XFS_ERROR(error);
 
@@ -1160,7 +1159,6 @@
 xfs_release(
 	xfs_inode_t	*ip)
 {
-	bhv_vnode_t	*vp = XFS_ITOV(ip);
 	xfs_mount_t	*mp = ip->i_mount;
 	int		error;
 
@@ -1195,13 +1193,13 @@
 		 * be exposed to that problem.
 		 */
 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
-		if (truncated && VN_DIRTY(vp) && ip->i_delayed_blks > 0)
+		if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
 			xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
 	}
 
 	if (ip->i_d.di_nlink != 0) {
 		if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
-		     ((ip->i_size > 0) || (VN_CACHED(vp) > 0 ||
+		     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
 		       ip->i_delayed_blks > 0)) &&
 		     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
 		    (!(ip->i_d.di_flags &
@@ -1227,7 +1225,6 @@
 xfs_inactive(
 	xfs_inode_t	*ip)
 {
-	bhv_vnode_t	*vp = XFS_ITOV(ip);
 	xfs_bmap_free_t	free_list;
 	xfs_fsblock_t	first_block;
 	int		committed;
@@ -1242,7 +1239,7 @@
 	 * If the inode is already free, then there can be nothing
 	 * to clean up here.
 	 */
-	if (ip->i_d.di_mode == 0 || VN_BAD(vp)) {
+	if (ip->i_d.di_mode == 0 || VN_BAD(VFS_I(ip))) {
 		ASSERT(ip->i_df.if_real_bytes == 0);
 		ASSERT(ip->i_df.if_broot_bytes == 0);
 		return VN_INACTIVE_CACHE;
@@ -1272,7 +1269,7 @@
 
 	if (ip->i_d.di_nlink != 0) {
 		if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
-                     ((ip->i_size > 0) || (VN_CACHED(vp) > 0 ||
+                     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
                        ip->i_delayed_blks > 0)) &&
 		      (ip->i_df.if_flags & XFS_IFEXTENTS) &&
 		     (!(ip->i_d.di_flags &
@@ -1536,7 +1533,7 @@
 	 * Make sure that we have allocated dquot(s) on disk.
 	 */
 	error = XFS_QM_DQVOPALLOC(mp, dp,
-			current_fsuid(credp), current_fsgid(credp), prid,
+			current_fsuid(), current_fsgid(), prid,
 			XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp);
 	if (error)
 		goto std_return;
@@ -1708,111 +1705,6 @@
 }
 
 #ifdef DEBUG
-/*
- * Some counters to see if (and how often) we are hitting some deadlock
- * prevention code paths.
- */
-
-int xfs_rm_locks;
-int xfs_rm_lock_delays;
-int xfs_rm_attempts;
-#endif
-
-/*
- * The following routine will lock the inodes associated with the
- * directory and the named entry in the directory. The locks are
- * acquired in increasing inode number.
- *
- * If the entry is "..", then only the directory is locked. The
- * vnode ref count will still include that from the .. entry in
- * this case.
- *
- * There is a deadlock we need to worry about. If the locked directory is
- * in the AIL, it might be blocking up the log. The next inode we lock
- * could be already locked by another thread waiting for log space (e.g
- * a permanent log reservation with a long running transaction (see
- * xfs_itruncate_finish)). To solve this, we must check if the directory
- * is in the ail and use lock_nowait. If we can't lock, we need to
- * drop the inode lock on the directory and try again. xfs_iunlock will
- * potentially push the tail if we were holding up the log.
- */
-STATIC int
-xfs_lock_dir_and_entry(
-	xfs_inode_t	*dp,
-	xfs_inode_t	*ip)	/* inode of entry 'name' */
-{
-	int		attempts;
-	xfs_ino_t	e_inum;
-	xfs_inode_t	*ips[2];
-	xfs_log_item_t	*lp;
-
-#ifdef DEBUG
-	xfs_rm_locks++;
-#endif
-	attempts = 0;
-
-again:
-	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
-
-	e_inum = ip->i_ino;
-
-	xfs_itrace_ref(ip);
-
-	/*
-	 * We want to lock in increasing inum. Since we've already
-	 * acquired the lock on the directory, we may need to release
-	 * if if the inum of the entry turns out to be less.
-	 */
-	if (e_inum > dp->i_ino) {
-		/*
-		 * We are already in the right order, so just
-		 * lock on the inode of the entry.
-		 * We need to use nowait if dp is in the AIL.
-		 */
-
-		lp = (xfs_log_item_t *)dp->i_itemp;
-		if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
-			if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
-				attempts++;
-#ifdef DEBUG
-				xfs_rm_attempts++;
-#endif
-
-				/*
-				 * Unlock dp and try again.
-				 * xfs_iunlock will try to push the tail
-				 * if the inode is in the AIL.
-				 */
-
-				xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
-				if ((attempts % 5) == 0) {
-					delay(1); /* Don't just spin the CPU */
-#ifdef DEBUG
-					xfs_rm_lock_delays++;
-#endif
-				}
-				goto again;
-			}
-		} else {
-			xfs_ilock(ip, XFS_ILOCK_EXCL);
-		}
-	} else if (e_inum < dp->i_ino) {
-		xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
-		ips[0] = ip;
-		ips[1] = dp;
-		xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
-	}
-	/* else	 e_inum == dp->i_ino */
-	/*     This can happen if we're asked to lock /x/..
-	 *     the entry is "..", which is also the parent directory.
-	 */
-
-	return 0;
-}
-
-#ifdef DEBUG
 int xfs_locked_n;
 int xfs_small_retries;
 int xfs_middle_retries;
@@ -1946,6 +1838,45 @@
 #endif
 }
 
+void
+xfs_lock_two_inodes(
+	xfs_inode_t		*ip0,
+	xfs_inode_t		*ip1,
+	uint			lock_mode)
+{
+	xfs_inode_t		*temp;
+	int			attempts = 0;
+	xfs_log_item_t		*lp;
+
+	ASSERT(ip0->i_ino != ip1->i_ino);
+
+	if (ip0->i_ino > ip1->i_ino) {
+		temp = ip0;
+		ip0 = ip1;
+		ip1 = temp;
+	}
+
+ again:
+	xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
+
+	/*
+	 * If the first lock we have locked is in the AIL, we must TRY to get
+	 * the second lock. If we can't get it, we must release the first one
+	 * and try again.
+	 */
+	lp = (xfs_log_item_t *)ip0->i_itemp;
+	if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
+		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
+			xfs_iunlock(ip0, lock_mode);
+			if ((++attempts % 5) == 0)
+				delay(1); /* Don't just spin the CPU */
+			goto again;
+		}
+	} else {
+		xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
+	}
+}
+
 int
 xfs_remove(
 	xfs_inode_t             *dp,
@@ -2018,9 +1949,7 @@
 		goto out_trans_cancel;
 	}
 
-	error = xfs_lock_dir_and_entry(dp, ip);
-	if (error)
-		goto out_trans_cancel;
+	xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
 
 	/*
 	 * At this point, we've gotten both the directory and the entry
@@ -2047,9 +1976,6 @@
 		}
 	}
 
-	/*
-	 * Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
-	 */
 	XFS_BMAP_INIT(&free_list, &first_block);
 	error = xfs_dir_removename(tp, dp, name, ip->i_ino,
 					&first_block, &free_list, resblks);
@@ -2155,7 +2081,6 @@
 {
 	xfs_mount_t		*mp = tdp->i_mount;
 	xfs_trans_t		*tp;
-	xfs_inode_t		*ips[2];
 	int			error;
 	xfs_bmap_free_t         free_list;
 	xfs_fsblock_t           first_block;
@@ -2203,15 +2128,7 @@
 		goto error_return;
 	}
 
-	if (sip->i_ino < tdp->i_ino) {
-		ips[0] = sip;
-		ips[1] = tdp;
-	} else {
-		ips[0] = tdp;
-		ips[1] = sip;
-	}
-
-	xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
+	xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
 
 	/*
 	 * Increment vnode ref counts since xfs_trans_commit &
@@ -2352,7 +2269,7 @@
 	 * Make sure that we have allocated dquot(s) on disk.
 	 */
 	error = XFS_QM_DQVOPALLOC(mp, dp,
-			current_fsuid(credp), current_fsgid(credp), prid,
+			current_fsuid(), current_fsgid(), prid,
 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
 	if (error)
 		goto std_return;
@@ -2578,7 +2495,7 @@
 	 * Make sure that we have allocated dquot(s) on disk.
 	 */
 	error = XFS_QM_DQVOPALLOC(mp, dp,
-			current_fsuid(credp), current_fsgid(credp), prid,
+			current_fsuid(), current_fsgid(), prid,
 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
 	if (error)
 		goto std_return;
@@ -2873,14 +2790,13 @@
 xfs_reclaim(
 	xfs_inode_t	*ip)
 {
-	bhv_vnode_t	*vp = XFS_ITOV(ip);
 
 	xfs_itrace_entry(ip);
 
-	ASSERT(!VN_MAPPED(vp));
+	ASSERT(!VN_MAPPED(VFS_I(ip)));
 
 	/* bad inode, get out here ASAP */
-	if (VN_BAD(vp)) {
+	if (VN_BAD(VFS_I(ip))) {
 		xfs_ireclaim(ip);
 		return 0;
 	}
@@ -2917,7 +2833,7 @@
 		XFS_MOUNT_ILOCK(mp);
 		spin_lock(&ip->i_flags_lock);
 		__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
-		vn_to_inode(vp)->i_private = NULL;
+		VFS_I(ip)->i_private = NULL;
 		ip->i_vnode = NULL;
 		spin_unlock(&ip->i_flags_lock);
 		list_add_tail(&ip->i_reclaim, &mp->m_del_inodes);
@@ -2933,7 +2849,7 @@
 	int		sync_mode)
 {
 	xfs_perag_t	*pag = xfs_get_perag(ip->i_mount, ip->i_ino);
-	bhv_vnode_t	*vp = XFS_ITOV_NULL(ip);
+	struct inode	*vp = VFS_I(ip);
 
 	if (vp && VN_BAD(vp))
 		goto reclaim;
@@ -3321,7 +3237,6 @@
 	xfs_off_t		len,
 	int			attr_flags)
 {
-	bhv_vnode_t		*vp;
 	int			committed;
 	int			done;
 	xfs_off_t		end_dmi_offset;
@@ -3341,7 +3256,6 @@
 	xfs_trans_t		*tp;
 	int			need_iolock = 1;
 
-	vp = XFS_ITOV(ip);
 	mp = ip->i_mount;
 
 	xfs_itrace_entry(ip);
@@ -3378,7 +3292,7 @@
 	rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
 	ioffset = offset & ~(rounding - 1);
 
-	if (VN_CACHED(vp) != 0) {
+	if (VN_CACHED(VFS_I(ip)) != 0) {
 		xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
 		error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
 		if (error)
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index 22aa58c..dcc8120 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -31,9 +31,6 @@
 #define ALIAS_TABLE_ENTRY_SIZE		2
 #define RLOOKUP_TABLE_ENTRY_SIZE	(sizeof(void *))
 
-/* helper macros */
-#define LOW_U32(x) ((x) & ((1ULL << 32)-1))
-
 /* Length of the MMIO region for the AMD IOMMU */
 #define MMIO_REGION_LENGTH       0x4000
 
@@ -69,6 +66,9 @@
 #define MMIO_EVT_TAIL_OFFSET	0x2018
 #define MMIO_STATUS_OFFSET	0x2020
 
+/* MMIO status bits */
+#define MMIO_STATUS_COM_WAIT_INT_MASK	0x04
+
 /* feature control bits */
 #define CONTROL_IOMMU_EN        0x00ULL
 #define CONTROL_HT_TUN_EN       0x01ULL
@@ -89,6 +89,7 @@
 #define CMD_INV_IOMMU_PAGES     0x03
 
 #define CMD_COMPL_WAIT_STORE_MASK	0x01
+#define CMD_COMPL_WAIT_INT_MASK		0x02
 #define CMD_INV_IOMMU_PAGES_SIZE_MASK	0x01
 #define CMD_INV_IOMMU_PAGES_PDE_MASK	0x02
 
@@ -99,6 +100,7 @@
 #define DEV_ENTRY_TRANSLATION   0x01
 #define DEV_ENTRY_IR            0x3d
 #define DEV_ENTRY_IW            0x3e
+#define DEV_ENTRY_NO_PAGE_FAULT	0x62
 #define DEV_ENTRY_EX            0x67
 #define DEV_ENTRY_SYSMGT1       0x68
 #define DEV_ENTRY_SYSMGT2       0x69
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 0048fb7..56d00e3 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -13,6 +13,7 @@
 #include <linux/sched.h>
 #include <linux/kernel_stat.h>
 #include <linux/regset.h>
+#include <linux/hardirq.h>
 #include <asm/asm.h>
 #include <asm/processor.h>
 #include <asm/sigcontext.h>
@@ -234,6 +235,37 @@
 	preempt_enable();
 }
 
+/*
+ * Some instructions like VIA's padlock instructions generate a spurious
+ * DNA fault but don't modify SSE registers. And these instructions
+ * get used from interrupt context aswell. To prevent these kernel instructions
+ * in interrupt context interact wrongly with other user/kernel fpu usage, we
+ * should use them only in the context of irq_ts_save/restore()
+ */
+static inline int irq_ts_save(void)
+{
+	/*
+	 * If we are in process context, we are ok to take a spurious DNA fault.
+	 * Otherwise, doing clts() in process context require pre-emption to
+	 * be disabled or some heavy lifting like kernel_fpu_begin()
+	 */
+	if (!in_interrupt())
+		return 0;
+
+	if (read_cr0() & X86_CR0_TS) {
+		clts();
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline void irq_ts_restore(int TS_state)
+{
+	if (TS_state)
+		stts();
+}
+
 #ifdef CONFIG_X86_64
 
 static inline void save_init_fpu(struct task_struct *tsk)
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index d12498e..ee48ef8 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -101,6 +101,24 @@
 	return crt->digest(req);
 }
 
+static inline int crypto_ahash_init(struct ahash_request *req)
+{
+	struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
+	return crt->init(req);
+}
+
+static inline int crypto_ahash_update(struct ahash_request *req)
+{
+	struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
+	return crt->update(req);
+}
+
+static inline int crypto_ahash_final(struct ahash_request *req)
+{
+	struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
+	return crt->final(req);
+}
+
 static inline void ahash_request_set_tfm(struct ahash_request *req,
 					 struct crypto_ahash *tfm)
 {
diff --git a/include/linux/completion.h b/include/linux/completion.h
index d2961b6..57faa60 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -55,4 +55,49 @@
 
 #define INIT_COMPLETION(x)	((x).done = 0)
 
+
+/**
+ *	try_wait_for_completion - try to decrement a completion without blocking
+ *	@x:	completion structure
+ *
+ *	Returns: 0 if a decrement cannot be done without blocking
+ *		 1 if a decrement succeeded.
+ *
+ *	If a completion is being used as a counting completion,
+ *	attempt to decrement the counter without blocking. This
+ *	enables us to avoid waiting if the resource the completion
+ *	is protecting is not available.
+ */
+static inline bool try_wait_for_completion(struct completion *x)
+{
+	int ret = 1;
+
+	spin_lock_irq(&x->wait.lock);
+	if (!x->done)
+		ret = 0;
+	else
+		x->done--;
+	spin_unlock_irq(&x->wait.lock);
+	return ret;
+}
+
+/**
+ *	completion_done - Test to see if a completion has any waiters
+ *	@x:	completion structure
+ *
+ *	Returns: 0 if there are waiters (wait_for_completion() in progress)
+ *		 1 if there are no waiters.
+ *
+ */
+static inline bool completion_done(struct completion *x)
+{
+	int ret = 1;
+
+	spin_lock_irq(&x->wait.lock);
+	if (!x->done)
+		ret = 0;
+	spin_unlock_irq(&x->wait.lock);
+	return ret;
+}
+
 #endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
new file mode 100644
index 0000000..b69222c
--- /dev/null
+++ b/include/linux/cred.h
@@ -0,0 +1,50 @@
+/* Credentials management
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_CRED_H
+#define _LINUX_CRED_H
+
+#define get_current_user()	(get_uid(current->user))
+
+#define task_uid(task)		((task)->uid)
+#define task_gid(task)		((task)->gid)
+#define task_euid(task)		((task)->euid)
+#define task_egid(task)		((task)->egid)
+
+#define current_uid()		(current->uid)
+#define current_gid()		(current->gid)
+#define current_euid()		(current->euid)
+#define current_egid()		(current->egid)
+#define current_suid()		(current->suid)
+#define current_sgid()		(current->sgid)
+#define current_fsuid()		(current->fsuid)
+#define current_fsgid()		(current->fsgid)
+#define current_cap()		(current->cap_effective)
+
+#define current_uid_gid(_uid, _gid)		\
+do {						\
+	*(_uid) = current->uid;			\
+	*(_gid) = current->gid;			\
+} while(0)
+
+#define current_euid_egid(_uid, _gid)		\
+do {						\
+	*(_uid) = current->euid;		\
+	*(_gid) = current->egid;		\
+} while(0)
+
+#define current_fsuid_fsgid(_uid, _gid)		\
+do {						\
+	*(_uid) = current->fsuid;		\
+	*(_gid) = current->fsgid;		\
+} while(0)
+
+#endif /* _LINUX_CRED_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5850bfb..cfb0d87 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -87,6 +87,7 @@
 #include <linux/task_io_accounting.h>
 #include <linux/kobject.h>
 #include <linux/latencytop.h>
+#include <linux/cred.h>
 
 #include <asm/processor.h>
 
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cfcc45b..358661c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -901,7 +901,7 @@
 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
 {
 	if (len > skb_headlen(skb) &&
-	    !__pskb_pull_tail(skb, len-skb_headlen(skb)))
+	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
 		return NULL;
 	skb->len -= len;
 	return skb->data += len;
@@ -918,7 +918,7 @@
 		return 1;
 	if (unlikely(len > skb->len))
 		return 0;
-	return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
+	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
 }
 
 /**
@@ -1321,7 +1321,7 @@
 	unsigned int size = skb->len;
 	if (likely(size >= len))
 		return 0;
-	return skb_pad(skb, len-size);
+	return skb_pad(skb, len - size);
 }
 
 static inline int skb_add_data(struct sk_buff *skb,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 5811c5d..0924cd9 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -110,6 +110,8 @@
  * @sysfs_files_created: sysfs attributes exist
  * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
  *	capability during autosuspend.
+ * @needs_binding: flag set when the driver should be re-probed or unbound
+ *	following a reset or suspend operation it doesn't support.
  * @dev: driver model's view of this device
  * @usb_dev: if an interface is bound to the USB major, this will point
  *	to the sysfs representation for that device.
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
new file mode 100644
index 0000000..630962c
--- /dev/null
+++ b/include/linux/usb/musb.h
@@ -0,0 +1,98 @@
+/*
+ * This is used to for host and peripheral modes of the driver for
+ * Inventra (Multidrop) Highspeed Dual-Role Controllers:  (M)HDRC.
+ *
+ * Board initialization should put one of these into dev->platform_data,
+ * probably on some platform_device named "musb_hdrc".  It encapsulates
+ * key configuration differences between boards.
+ */
+
+/* The USB role is defined by the connector used on the board, so long as
+ * standards are being followed.  (Developer boards sometimes won't.)
+ */
+enum musb_mode {
+	MUSB_UNDEFINED = 0,
+	MUSB_HOST,		/* A or Mini-A connector */
+	MUSB_PERIPHERAL,	/* B or Mini-B connector */
+	MUSB_OTG		/* Mini-AB connector */
+};
+
+struct clk;
+
+struct musb_hdrc_eps_bits {
+	const char	name[16];
+	u8		bits;
+};
+
+struct musb_hdrc_config {
+	/* MUSB configuration-specific details */
+	unsigned	multipoint:1;	/* multipoint device */
+	unsigned	dyn_fifo:1;	/* supports dynamic fifo sizing */
+	unsigned	soft_con:1;	/* soft connect required */
+	unsigned	utm_16:1;	/* utm data witdh is 16 bits */
+	unsigned	big_endian:1;	/* true if CPU uses big-endian */
+	unsigned	mult_bulk_tx:1;	/* Tx ep required for multbulk pkts */
+	unsigned	mult_bulk_rx:1;	/* Rx ep required for multbulk pkts */
+	unsigned	high_iso_tx:1;	/* Tx ep required for HB iso */
+	unsigned	high_iso_rx:1;	/* Rx ep required for HD iso */
+	unsigned	dma:1;		/* supports DMA */
+	unsigned	vendor_req:1;	/* vendor registers required */
+
+	u8		num_eps;	/* number of endpoints _with_ ep0 */
+	u8		dma_channels;	/* number of dma channels */
+	u8		dyn_fifo_size;	/* dynamic size in bytes */
+	u8		vendor_ctrl;	/* vendor control reg width */
+	u8		vendor_stat;	/* vendor status reg witdh */
+	u8		dma_req_chan;	/* bitmask for required dma channels */
+	u8		ram_bits;	/* ram address size */
+
+	struct musb_hdrc_eps_bits *eps_bits;
+};
+
+struct musb_hdrc_platform_data {
+	/* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */
+	u8		mode;
+
+	/* for clk_get() */
+	const char	*clock;
+
+	/* (HOST or OTG) switch VBUS on/off */
+	int		(*set_vbus)(struct device *dev, int is_on);
+
+	/* (HOST or OTG) mA/2 power supplied on (default = 8mA) */
+	u8		power;
+
+	/* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */
+	u8		min_power;
+
+	/* (HOST or OTG) msec/2 after VBUS on till power good */
+	u8		potpgt;
+
+	/* Power the device on or off */
+	int		(*set_power)(int state);
+
+	/* Turn device clock on or off */
+	int		(*set_clock)(struct clk *clock, int is_on);
+
+	/* MUSB configuration-specific details */
+	struct musb_hdrc_config	*config;
+};
+
+
+/* TUSB 6010 support */
+
+#define	TUSB6010_OSCCLK_60	16667	/* psec/clk @ 60.0 MHz */
+#define	TUSB6010_REFCLK_24	41667	/* psec/clk @ 24.0 MHz XI */
+#define	TUSB6010_REFCLK_19	52083	/* psec/clk @ 19.2 MHz CLKIN */
+
+#ifdef	CONFIG_ARCH_OMAP2
+
+extern int __init tusb6010_setup_interface(
+		struct musb_hdrc_platform_data *data,
+		unsigned ps_refclk, unsigned waitpin,
+		unsigned async_cs, unsigned sync_cs,
+		unsigned irq, unsigned dmachan);
+
+extern int tusb6010_platform_retime(unsigned is_refclk);
+
+#endif	/* OMAP2 */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 09a3e6a..655341d 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -17,7 +17,8 @@
 #include <linux/mutex.h>
 
 #define SERIAL_TTY_MAJOR	188	/* Nice legal number now */
-#define SERIAL_TTY_MINORS	255	/* loads of devices :) */
+#define SERIAL_TTY_MINORS	254	/* loads of devices :) */
+#define SERIAL_TTY_NO_MINOR	255	/* No minor was assigned */
 
 /* The maximum number of ports one device can grab at once */
 #define MAX_NUM_PORTS		8
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2f8b3c0..bc391ba 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -38,11 +38,6 @@
 #define RT6_LOOKUP_F_SRCPREF_COA	0x00000020
 
 
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-extern struct rt6_info	*ip6_prohibit_entry;
-extern struct rt6_info	*ip6_blk_hole_entry;
-#endif
-
 extern void			ip6_route_input(struct sk_buff *skb);
 
 extern struct dst_entry *	ip6_route_output(struct net *net,
@@ -118,7 +113,6 @@
 extern void rt6_ifdown(struct net *net, struct net_device *dev);
 extern void rt6_mtu_change(struct net_device *dev, unsigned mtu);
 
-extern rwlock_t rt6_lock;
 
 /*
  *	Store a destination cache entry in a socket
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index cbb59eb..7312c3d 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -140,8 +140,24 @@
 
 
 /*
- *	IPVS statistics object
+ *	IPVS statistics objects
  */
+struct ip_vs_estimator {
+	struct list_head	list;
+
+	u64			last_inbytes;
+	u64			last_outbytes;
+	u32			last_conns;
+	u32			last_inpkts;
+	u32			last_outpkts;
+
+	u32			cps;
+	u32			inpps;
+	u32			outpps;
+	u32			inbps;
+	u32			outbps;
+};
+
 struct ip_vs_stats
 {
 	__u32                   conns;          /* connections scheduled */
@@ -156,7 +172,15 @@
 	__u32			inbps;		/* current in byte rate */
 	__u32			outbps;		/* current out byte rate */
 
+	/*
+	 * Don't add anything before the lock, because we use memcpy() to copy
+	 * the members before the lock to struct ip_vs_stats_user in
+	 * ip_vs_ctl.c.
+	 */
+
 	spinlock_t              lock;           /* spin lock */
+
+	struct ip_vs_estimator	est;		/* estimator */
 };
 
 struct dst_entry;
@@ -440,7 +464,7 @@
  */
 extern const char *ip_vs_proto_name(unsigned proto);
 extern void ip_vs_init_hash_table(struct list_head *table, int rows);
-#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0]))
+#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
 
 #define IP_VS_APP_TYPE_FTP	1
 
@@ -620,7 +644,7 @@
 extern int sysctl_ip_vs_sync_threshold[2];
 extern int sysctl_ip_vs_nat_icmp_send;
 extern struct ip_vs_stats ip_vs_stats;
-extern struct ctl_path net_vs_ctl_path[];
+extern const struct ctl_path net_vs_ctl_path[];
 
 extern struct ip_vs_service *
 ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport);
@@ -659,7 +683,7 @@
 /*
  *      IPVS rate estimator prototypes (from ip_vs_est.c)
  */
-extern int ip_vs_new_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
 extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
 extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
 
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6affcfa..853fe83 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -89,7 +89,10 @@
 
 static inline void qdisc_run(struct Qdisc *q)
 {
-	if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
+	struct netdev_queue *txq = q->dev_queue;
+
+	if (!netif_tx_queue_stopped(txq) &&
+	    !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
 		__qdisc_run(q);
 }
 
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 57abe82..a89f32f 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -99,7 +99,7 @@
 
 static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
 
-/* Protects against NULL dereference */
+/* Protects against NULL dereference and RCU write-side */
 static DEFINE_RWLOCK(est_lock);
 
 static void est_timer(unsigned long arg)
@@ -185,6 +185,7 @@
 	est->last_packets = bstats->packets;
 	est->avpps = rate_est->pps<<10;
 
+	write_lock_bh(&est_lock);
 	if (!elist[idx].timer.function) {
 		INIT_LIST_HEAD(&elist[idx].list);
 		setup_timer(&elist[idx].timer, est_timer, idx);
@@ -194,6 +195,7 @@
 		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
 
 	list_add_rcu(&est->list, &elist[idx].list);
+	write_unlock_bh(&est_lock);
 	return 0;
 }
 
@@ -212,7 +214,6 @@
  * Removes the rate estimator specified by &bstats and &rate_est
  * and deletes the timer.
  *
- * NOTE: Called under rtnl_mutex
  */
 void gen_kill_estimator(struct gnet_stats_basic *bstats,
 	struct gnet_stats_rate_est *rate_est)
@@ -226,17 +227,17 @@
 		if (!elist[idx].timer.function)
 			continue;
 
+		write_lock_bh(&est_lock);
 		list_for_each_entry_safe(e, n, &elist[idx].list, list) {
 			if (e->rate_est != rate_est || e->bstats != bstats)
 				continue;
 
-			write_lock_bh(&est_lock);
 			e->bstats = NULL;
-			write_unlock_bh(&est_lock);
 
 			list_del_rcu(&e->list);
 			call_rcu(&e->e_rcu, __gen_kill_estimator);
 		}
+		write_unlock_bh(&est_lock);
 	}
 }
 
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5262364..a756847 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1961,6 +1961,8 @@
  */
 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
 {
+	int ntxq;
+
 	if (!pkt_dev->odev) {
 		printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
 		       "setup_inject.\n");
@@ -1969,6 +1971,33 @@
 		return;
 	}
 
+	/* make sure that we don't pick a non-existing transmit queue */
+	ntxq = pkt_dev->odev->real_num_tx_queues;
+	if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
+		printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
+		       "disabled because CPU count (%d) exceeds number ",
+		       num_online_cpus());
+		printk(KERN_WARNING "pktgen: WARNING: of tx queues "
+		       "(%d) on %s \n", ntxq, pkt_dev->odev->name);
+		pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
+	}
+	if (ntxq <= pkt_dev->queue_map_min) {
+		printk(KERN_WARNING "pktgen: WARNING: Requested "
+		       "queue_map_min (%d) exceeds number of tx\n",
+		       pkt_dev->queue_map_min);
+		printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+		       "%s, resetting\n", ntxq, pkt_dev->odev->name);
+		pkt_dev->queue_map_min = ntxq - 1;
+	}
+	if (ntxq <= pkt_dev->queue_map_max) {
+		printk(KERN_WARNING "pktgen: WARNING: Requested "
+		       "queue_map_max (%d) exceeds number of tx\n",
+		       pkt_dev->queue_map_max);
+		printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+		       "%s, resetting\n", ntxq, pkt_dev->odev->name);
+		pkt_dev->queue_map_max = ntxq - 1;
+	}
+
 	/* Default to the interface's mac if not explicitly set. */
 
 	if (is_zero_ether_addr(pkt_dev->src_mac))
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b622d974..1ca3b26 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -474,6 +474,11 @@
 
 	if (copy_from_user(&opt, optval, sizeof(opt)))
 		return -EFAULT;
+	/*
+	 * rfc4340: 6.1. Change Options
+	 */
+	if (opt.dccpsf_len < 1)
+		return -EINVAL;
 
 	val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
 	if (!val)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6203ece..f70fac6 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -289,6 +289,7 @@
 	struct rtable *rt;
 	struct iphdr *pip;
 	struct igmpv3_report *pig;
+	struct net *net = dev_net(dev);
 
 	skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
 	if (skb == NULL)
@@ -299,7 +300,7 @@
 				    .nl_u = { .ip4_u = {
 				    .daddr = IGMPV3_ALL_MCR } },
 				    .proto = IPPROTO_IGMP };
-		if (ip_route_output_key(&init_net, &rt, &fl)) {
+		if (ip_route_output_key(net, &rt, &fl)) {
 			kfree_skb(skb);
 			return NULL;
 		}
@@ -629,6 +630,7 @@
 	struct igmphdr *ih;
 	struct rtable *rt;
 	struct net_device *dev = in_dev->dev;
+	struct net *net = dev_net(dev);
 	__be32	group = pmc ? pmc->multiaddr : 0;
 	__be32	dst;
 
@@ -643,7 +645,7 @@
 		struct flowi fl = { .oif = dev->ifindex,
 				    .nl_u = { .ip4_u = { .daddr = dst } },
 				    .proto = IPPROTO_IGMP };
-		if (ip_route_output_key(&init_net, &rt, &fl))
+		if (ip_route_output_key(net, &rt, &fl))
 			return -1;
 	}
 	if (rt->rt_src == 0) {
@@ -1196,9 +1198,6 @@
 
 	ASSERT_RTNL();
 
-	if (!net_eq(dev_net(in_dev->dev), &init_net))
-		return;
-
 	for (im=in_dev->mc_list; im; im=im->next) {
 		if (im->multiaddr == addr) {
 			im->users++;
@@ -1278,9 +1277,6 @@
 
 	ASSERT_RTNL();
 
-	if (!net_eq(dev_net(in_dev->dev), &init_net))
-		return;
-
 	for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
 		if (i->multiaddr==addr) {
 			if (--i->users == 0) {
@@ -1308,9 +1304,6 @@
 
 	ASSERT_RTNL();
 
-	if (!net_eq(dev_net(in_dev->dev), &init_net))
-		return;
-
 	for (i=in_dev->mc_list; i; i=i->next)
 		igmp_group_dropped(i);
 
@@ -1331,9 +1324,6 @@
 {
 	ASSERT_RTNL();
 
-	if (!net_eq(dev_net(in_dev->dev), &init_net))
-		return;
-
 	in_dev->mc_tomb = NULL;
 #ifdef CONFIG_IP_MULTICAST
 	in_dev->mr_gq_running = 0;
@@ -1357,9 +1347,6 @@
 
 	ASSERT_RTNL();
 
-	if (!net_eq(dev_net(in_dev->dev), &init_net))
-		return;
-
 	ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
 
 	for (i=in_dev->mc_list; i; i=i->next)
@@ -1376,9 +1363,6 @@
 
 	ASSERT_RTNL();
 
-	if (!net_eq(dev_net(in_dev->dev), &init_net))
-		return;
-
 	/* Deactivate timers */
 	ip_mc_down(in_dev);
 
@@ -1395,7 +1379,7 @@
 	write_unlock_bh(&in_dev->mc_list_lock);
 }
 
-static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
+static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
 {
 	struct flowi fl = { .nl_u = { .ip4_u =
 				      { .daddr = imr->imr_multiaddr.s_addr } } };
@@ -1404,19 +1388,19 @@
 	struct in_device *idev = NULL;
 
 	if (imr->imr_ifindex) {
-		idev = inetdev_by_index(&init_net, imr->imr_ifindex);
+		idev = inetdev_by_index(net, imr->imr_ifindex);
 		if (idev)
 			__in_dev_put(idev);
 		return idev;
 	}
 	if (imr->imr_address.s_addr) {
-		dev = ip_dev_find(&init_net, imr->imr_address.s_addr);
+		dev = ip_dev_find(net, imr->imr_address.s_addr);
 		if (!dev)
 			return NULL;
 		dev_put(dev);
 	}
 
-	if (!dev && !ip_route_output_key(&init_net, &rt, &fl)) {
+	if (!dev && !ip_route_output_key(net, &rt, &fl)) {
 		dev = rt->u.dst.dev;
 		ip_rt_put(rt);
 	}
@@ -1754,18 +1738,16 @@
 	struct ip_mc_socklist *iml=NULL, *i;
 	struct in_device *in_dev;
 	struct inet_sock *inet = inet_sk(sk);
+	struct net *net = sock_net(sk);
 	int ifindex;
 	int count = 0;
 
 	if (!ipv4_is_multicast(addr))
 		return -EINVAL;
 
-	if (!net_eq(sock_net(sk), &init_net))
-		return -EPROTONOSUPPORT;
-
 	rtnl_lock();
 
-	in_dev = ip_mc_find_dev(imr);
+	in_dev = ip_mc_find_dev(net, imr);
 
 	if (!in_dev) {
 		iml = NULL;
@@ -1827,15 +1809,13 @@
 	struct inet_sock *inet = inet_sk(sk);
 	struct ip_mc_socklist *iml, **imlp;
 	struct in_device *in_dev;
+	struct net *net = sock_net(sk);
 	__be32 group = imr->imr_multiaddr.s_addr;
 	u32 ifindex;
 	int ret = -EADDRNOTAVAIL;
 
-	if (!net_eq(sock_net(sk), &init_net))
-		return -EPROTONOSUPPORT;
-
 	rtnl_lock();
-	in_dev = ip_mc_find_dev(imr);
+	in_dev = ip_mc_find_dev(net, imr);
 	ifindex = imr->imr_ifindex;
 	for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
 		if (iml->multi.imr_multiaddr.s_addr != group)
@@ -1873,21 +1853,19 @@
 	struct in_device *in_dev = NULL;
 	struct inet_sock *inet = inet_sk(sk);
 	struct ip_sf_socklist *psl;
+	struct net *net = sock_net(sk);
 	int leavegroup = 0;
 	int i, j, rv;
 
 	if (!ipv4_is_multicast(addr))
 		return -EINVAL;
 
-	if (!net_eq(sock_net(sk), &init_net))
-		return -EPROTONOSUPPORT;
-
 	rtnl_lock();
 
 	imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
 	imr.imr_address.s_addr = mreqs->imr_interface;
 	imr.imr_ifindex = ifindex;
-	in_dev = ip_mc_find_dev(&imr);
+	in_dev = ip_mc_find_dev(net, &imr);
 
 	if (!in_dev) {
 		err = -ENODEV;
@@ -2007,6 +1985,7 @@
 	struct in_device *in_dev;
 	struct inet_sock *inet = inet_sk(sk);
 	struct ip_sf_socklist *newpsl, *psl;
+	struct net *net = sock_net(sk);
 	int leavegroup = 0;
 
 	if (!ipv4_is_multicast(addr))
@@ -2015,15 +1994,12 @@
 	    msf->imsf_fmode != MCAST_EXCLUDE)
 		return -EINVAL;
 
-	if (!net_eq(sock_net(sk), &init_net))
-		return -EPROTONOSUPPORT;
-
 	rtnl_lock();
 
 	imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
 	imr.imr_address.s_addr = msf->imsf_interface;
 	imr.imr_ifindex = ifindex;
-	in_dev = ip_mc_find_dev(&imr);
+	in_dev = ip_mc_find_dev(net, &imr);
 
 	if (!in_dev) {
 		err = -ENODEV;
@@ -2094,19 +2070,17 @@
 	struct in_device *in_dev;
 	struct inet_sock *inet = inet_sk(sk);
 	struct ip_sf_socklist *psl;
+	struct net *net = sock_net(sk);
 
 	if (!ipv4_is_multicast(addr))
 		return -EINVAL;
 
-	if (!net_eq(sock_net(sk), &init_net))
-		return -EPROTONOSUPPORT;
-
 	rtnl_lock();
 
 	imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
 	imr.imr_address.s_addr = msf->imsf_interface;
 	imr.imr_ifindex = 0;
-	in_dev = ip_mc_find_dev(&imr);
+	in_dev = ip_mc_find_dev(net, &imr);
 
 	if (!in_dev) {
 		err = -ENODEV;
@@ -2163,9 +2137,6 @@
 	if (!ipv4_is_multicast(addr))
 		return -EINVAL;
 
-	if (!net_eq(sock_net(sk), &init_net))
-		return -EPROTONOSUPPORT;
-
 	rtnl_lock();
 
 	err = -EADDRNOTAVAIL;
@@ -2246,19 +2217,17 @@
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct ip_mc_socklist *iml;
+	struct net *net = sock_net(sk);
 
 	if (inet->mc_list == NULL)
 		return;
 
-	if (!net_eq(sock_net(sk), &init_net))
-		return;
-
 	rtnl_lock();
 	while ((iml = inet->mc_list) != NULL) {
 		struct in_device *in_dev;
 		inet->mc_list = iml->next;
 
-		in_dev = inetdev_by_index(&init_net, iml->multi.imr_ifindex);
+		in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
 		(void) ip_mc_leave_src(sk, iml, in_dev);
 		if (in_dev != NULL) {
 			ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 1f1897a..201b8ea 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -608,7 +608,7 @@
 }
 
 
-int ip_vs_app_init(void)
+int __init ip_vs_app_init(void)
 {
 	/* we will replace it with proc_net_ipvs_create() soon */
 	proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index f8bdae4..44a6872 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -965,7 +965,7 @@
 }
 
 
-int ip_vs_conn_init(void)
+int __init ip_vs_conn_init(void)
 {
 	int idx;
 
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 9a5ace0..6379705 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -683,9 +683,22 @@
 ip_vs_zero_stats(struct ip_vs_stats *stats)
 {
 	spin_lock_bh(&stats->lock);
-	memset(stats, 0, (char *)&stats->lock - (char *)stats);
-	spin_unlock_bh(&stats->lock);
+
+	stats->conns = 0;
+	stats->inpkts = 0;
+	stats->outpkts = 0;
+	stats->inbytes = 0;
+	stats->outbytes = 0;
+
+	stats->cps = 0;
+	stats->inpps = 0;
+	stats->outpps = 0;
+	stats->inbps = 0;
+	stats->outbps = 0;
+
 	ip_vs_zero_estimator(stats);
+
+	spin_unlock_bh(&stats->lock);
 }
 
 /*
@@ -1589,7 +1602,7 @@
 	{ .ctl_name = 0 }
 };
 
-struct ctl_path net_vs_ctl_path[] = {
+const struct ctl_path net_vs_ctl_path[] = {
 	{ .procname = "net", .ctl_name = CTL_NET, },
 	{ .procname = "ipv4", .ctl_name = NET_IPV4, },
 	{ .procname = "vs", },
@@ -1784,7 +1797,9 @@
 
 #endif
 
-struct ip_vs_stats ip_vs_stats;
+struct ip_vs_stats ip_vs_stats = {
+	.lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
+};
 
 #ifdef CONFIG_PROC_FS
 static int ip_vs_stats_show(struct seq_file *seq, void *v)
@@ -2306,7 +2321,7 @@
 };
 
 
-int ip_vs_control_init(void)
+int __init ip_vs_control_init(void)
 {
 	int ret;
 	int idx;
@@ -2333,8 +2348,6 @@
 		INIT_LIST_HEAD(&ip_vs_rtable[idx]);
 	}
 
-	memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
-	spin_lock_init(&ip_vs_stats.lock);
 	ip_vs_new_estimator(&ip_vs_stats);
 
 	/* Hook the defense timer */
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index 8afc150..fa66824 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -233,6 +233,7 @@
 	.name =			"dh",
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
 	.init_service =		ip_vs_dh_init_svc,
 	.done_service =		ip_vs_dh_done_svc,
 	.update_service =	ip_vs_dh_update_svc,
@@ -242,7 +243,6 @@
 
 static int __init ip_vs_dh_init(void)
 {
-	INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list);
 	return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
 }
 
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index bc04eed..5a20f93 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -17,6 +17,7 @@
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/sysctl.h>
+#include <linux/list.h>
 
 #include <net/ip_vs.h>
 
@@ -44,28 +45,11 @@
  */
 
 
-struct ip_vs_estimator
-{
-	struct ip_vs_estimator	*next;
-	struct ip_vs_stats	*stats;
+static void estimation_timer(unsigned long arg);
 
-	u32			last_conns;
-	u32			last_inpkts;
-	u32			last_outpkts;
-	u64			last_inbytes;
-	u64			last_outbytes;
-
-	u32			cps;
-	u32			inpps;
-	u32			outpps;
-	u32			inbps;
-	u32			outbps;
-};
-
-
-static struct ip_vs_estimator *est_list = NULL;
-static DEFINE_RWLOCK(est_lock);
-static struct timer_list est_timer;
+static LIST_HEAD(est_list);
+static DEFINE_SPINLOCK(est_lock);
+static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
 
 static void estimation_timer(unsigned long arg)
 {
@@ -76,9 +60,9 @@
 	u64 n_inbytes, n_outbytes;
 	u32 rate;
 
-	read_lock(&est_lock);
-	for (e = est_list; e; e = e->next) {
-		s = e->stats;
+	spin_lock(&est_lock);
+	list_for_each_entry(e, &est_list, list) {
+		s = container_of(e, struct ip_vs_stats, est);
 
 		spin_lock(&s->lock);
 		n_conns = s->conns;
@@ -114,19 +98,16 @@
 		s->outbps = (e->outbps+0xF)>>5;
 		spin_unlock(&s->lock);
 	}
-	read_unlock(&est_lock);
+	spin_unlock(&est_lock);
 	mod_timer(&est_timer, jiffies + 2*HZ);
 }
 
-int ip_vs_new_estimator(struct ip_vs_stats *stats)
+void ip_vs_new_estimator(struct ip_vs_stats *stats)
 {
-	struct ip_vs_estimator *est;
+	struct ip_vs_estimator *est = &stats->est;
 
-	est = kzalloc(sizeof(*est), GFP_KERNEL);
-	if (est == NULL)
-		return -ENOMEM;
+	INIT_LIST_HEAD(&est->list);
 
-	est->stats = stats;
 	est->last_conns = stats->conns;
 	est->cps = stats->cps<<10;
 
@@ -142,59 +123,40 @@
 	est->last_outbytes = stats->outbytes;
 	est->outbps = stats->outbps<<5;
 
-	write_lock_bh(&est_lock);
-	est->next = est_list;
-	if (est->next == NULL) {
-		setup_timer(&est_timer, estimation_timer, 0);
-		est_timer.expires = jiffies + 2*HZ;
-		add_timer(&est_timer);
-	}
-	est_list = est;
-	write_unlock_bh(&est_lock);
-	return 0;
+	spin_lock_bh(&est_lock);
+	if (list_empty(&est_list))
+		mod_timer(&est_timer, jiffies + 2 * HZ);
+	list_add(&est->list, &est_list);
+	spin_unlock_bh(&est_lock);
 }
 
 void ip_vs_kill_estimator(struct ip_vs_stats *stats)
 {
-	struct ip_vs_estimator *est, **pest;
-	int killed = 0;
+	struct ip_vs_estimator *est = &stats->est;
 
-	write_lock_bh(&est_lock);
-	pest = &est_list;
-	while ((est=*pest) != NULL) {
-		if (est->stats != stats) {
-			pest = &est->next;
-			continue;
-		}
-		*pest = est->next;
-		kfree(est);
-		killed++;
+	spin_lock_bh(&est_lock);
+	list_del(&est->list);
+	while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
+		spin_unlock_bh(&est_lock);
+		cpu_relax();
+		spin_lock_bh(&est_lock);
 	}
-	if (killed && est_list == NULL)
-		del_timer_sync(&est_timer);
-	write_unlock_bh(&est_lock);
+	spin_unlock_bh(&est_lock);
 }
 
 void ip_vs_zero_estimator(struct ip_vs_stats *stats)
 {
-	struct ip_vs_estimator *e;
+	struct ip_vs_estimator *est = &stats->est;
 
-	write_lock_bh(&est_lock);
-	for (e = est_list; e; e = e->next) {
-		if (e->stats != stats)
-			continue;
-
-		/* set counters zero */
-		e->last_conns = 0;
-		e->last_inpkts = 0;
-		e->last_outpkts = 0;
-		e->last_inbytes = 0;
-		e->last_outbytes = 0;
-		e->cps = 0;
-		e->inpps = 0;
-		e->outpps = 0;
-		e->inbps = 0;
-		e->outbps = 0;
-	}
-	write_unlock_bh(&est_lock);
+	/* set counters zero, caller must hold the stats->lock lock */
+	est->last_inbytes = 0;
+	est->last_outbytes = 0;
+	est->last_conns = 0;
+	est->last_inpkts = 0;
+	est->last_outpkts = 0;
+	est->cps = 0;
+	est->inpps = 0;
+	est->outpps = 0;
+	est->inbps = 0;
+	est->outbps = 0;
 }
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 0efa3db..7a6a319 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -539,6 +539,7 @@
 	.name =			"lblc",
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
 	.init_service =		ip_vs_lblc_init_svc,
 	.done_service =		ip_vs_lblc_done_svc,
 	.update_service =	ip_vs_lblc_update_svc,
@@ -550,7 +551,6 @@
 {
 	int ret;
 
-	INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
 	sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
 	ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
 	if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 8e3bbeb..c234e73 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -728,6 +728,7 @@
 	.name =			"lblcr",
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
 	.init_service =		ip_vs_lblcr_init_svc,
 	.done_service =		ip_vs_lblcr_done_svc,
 	.update_service =	ip_vs_lblcr_update_svc,
@@ -739,7 +740,6 @@
 {
 	int ret;
 
-	INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
 	sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
 	ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
 	if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index ac9f08e..ebcdbf7 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -98,6 +98,7 @@
 	.name =			"lc",
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
 	.init_service =		ip_vs_lc_init_svc,
 	.done_service =		ip_vs_lc_done_svc,
 	.update_service =	ip_vs_lc_update_svc,
@@ -107,7 +108,6 @@
 
 static int __init ip_vs_lc_init(void)
 {
-	INIT_LIST_HEAD(&ip_vs_lc_scheduler.n_list);
 	return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ;
 }
 
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index a46bf25..92f3a67 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -136,6 +136,7 @@
 	.name =			"nq",
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
 	.init_service =		ip_vs_nq_init_svc,
 	.done_service =		ip_vs_nq_done_svc,
 	.update_service =	ip_vs_nq_update_svc,
@@ -145,7 +146,6 @@
 
 static int __init ip_vs_nq_init(void)
 {
-	INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list);
 	return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
 }
 
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 876714f..6099a88 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -43,7 +43,7 @@
 /*
  *	register an ipvs protocol
  */
-static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp)
+static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
 {
 	unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
 
@@ -190,7 +190,7 @@
 }
 
 
-int ip_vs_protocol_init(void)
+int __init ip_vs_protocol_init(void)
 {
 	char protocols[64];
 #define REGISTER_PROTOCOL(p)			\
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index c8db12d..358110d 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -94,6 +94,7 @@
 	.name =			"rr",			/* name */
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
 	.init_service =		ip_vs_rr_init_svc,
 	.done_service =		ip_vs_rr_done_svc,
 	.update_service =	ip_vs_rr_update_svc,
@@ -102,7 +103,6 @@
 
 static int __init ip_vs_rr_init(void)
 {
-	INIT_LIST_HEAD(&ip_vs_rr_scheduler.n_list);
 	return register_ip_vs_scheduler(&ip_vs_rr_scheduler);
 }
 
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index b647673..a46ad9e 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -184,7 +184,7 @@
 
 	write_lock_bh(&__ip_vs_sched_lock);
 
-	if (scheduler->n_list.next != &scheduler->n_list) {
+	if (!list_empty(&scheduler->n_list)) {
 		write_unlock_bh(&__ip_vs_sched_lock);
 		ip_vs_use_count_dec();
 		IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
@@ -229,7 +229,7 @@
 	}
 
 	write_lock_bh(&__ip_vs_sched_lock);
-	if (scheduler->n_list.next == &scheduler->n_list) {
+	if (list_empty(&scheduler->n_list)) {
 		write_unlock_bh(&__ip_vs_sched_lock);
 		IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler "
 			  "is not in the list. failed\n", scheduler->name);
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index 2a7d313..77663d8 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -138,6 +138,7 @@
 	.name =			"sed",
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
 	.init_service =		ip_vs_sed_init_svc,
 	.done_service =		ip_vs_sed_done_svc,
 	.update_service =	ip_vs_sed_update_svc,
@@ -147,7 +148,6 @@
 
 static int __init ip_vs_sed_init(void)
 {
-	INIT_LIST_HEAD(&ip_vs_sed_scheduler.n_list);
 	return register_ip_vs_scheduler(&ip_vs_sed_scheduler);
 }
 
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index b8fdfac..7b979e2 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -230,6 +230,7 @@
 	.name =			"sh",
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list	 =		LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
 	.init_service =		ip_vs_sh_init_svc,
 	.done_service =		ip_vs_sh_done_svc,
 	.update_service =	ip_vs_sh_update_svc,
@@ -239,7 +240,6 @@
 
 static int __init ip_vs_sh_init(void)
 {
-	INIT_LIST_HEAD(&ip_vs_sh_scheduler.n_list);
 	return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
 }
 
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 45e9bd9..a652da2 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -904,9 +904,9 @@
 		 * progress of stopping the master sync daemon.
 		 */
 
-		spin_lock(&ip_vs_sync_lock);
+		spin_lock_bh(&ip_vs_sync_lock);
 		ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
-		spin_unlock(&ip_vs_sync_lock);
+		spin_unlock_bh(&ip_vs_sync_lock);
 		kthread_stop(sync_master_thread);
 		sync_master_thread = NULL;
 	} else if (state == IP_VS_STATE_BACKUP) {
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 772c3cb..9b0ef86 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -126,6 +126,7 @@
 	.name =			"wlc",
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
 	.init_service =		ip_vs_wlc_init_svc,
 	.done_service =		ip_vs_wlc_done_svc,
 	.update_service =	ip_vs_wlc_update_svc,
@@ -135,7 +136,6 @@
 
 static int __init ip_vs_wlc_init(void)
 {
-	INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list);
 	return register_ip_vs_scheduler(&ip_vs_wlc_scheduler);
 }
 
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 1d6932d..0d86a79 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -212,6 +212,7 @@
 	.name =			"wrr",
 	.refcnt =		ATOMIC_INIT(0),
 	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
 	.init_service =		ip_vs_wrr_init_svc,
 	.done_service =		ip_vs_wrr_done_svc,
 	.update_service =	ip_vs_wrr_update_svc,
@@ -220,7 +221,6 @@
 
 static int __init ip_vs_wrr_init(void)
 {
-	INIT_LIST_HEAD(&ip_vs_wrr_scheduler.n_list);
 	return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ;
 }
 
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 383d173..8e42fbb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -989,7 +989,9 @@
 		    up->encap_rcv != NULL) {
 			int ret;
 
+			bh_unlock_sock(sk);
 			ret = (*up->encap_rcv)(sk, skb);
+			bh_lock_sock(sk);
 			if (ret <= 0) {
 				UDP_INC_STATS_BH(sock_net(sk),
 						 UDP_MIB_INDATAGRAMS,
@@ -1092,7 +1094,7 @@
 			if (skb1) {
 				int ret = 0;
 
-				bh_lock_sock_nested(sk);
+				bh_lock_sock(sk);
 				if (!sock_owned_by_user(sk))
 					ret = udp_queue_rcv_skb(sk, skb1);
 				else
@@ -1194,7 +1196,7 @@
 
 	if (sk != NULL) {
 		int ret = 0;
-		bh_lock_sock_nested(sk);
+		bh_lock_sock(sk);
 		if (!sock_owned_by_user(sk))
 			ret = udp_queue_rcv_skb(sk, skb);
 		else
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5a3e87e..41b165f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2187,8 +2187,9 @@
 #endif
 			NLA_PUT_U32(skb, RTA_IIF, iif);
 	} else if (dst) {
+		struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
 		struct in6_addr saddr_buf;
-		if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
+		if (ipv6_dev_get_saddr(idev ? idev->dev : NULL,
 				       dst, 0, &saddr_buf) == 0)
 			NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
 	}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d1477b3..a6aecf7 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -379,7 +379,7 @@
 					uh->source, saddr, dif))) {
 		struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
 		if (buff) {
-			bh_lock_sock_nested(sk2);
+			bh_lock_sock(sk2);
 			if (!sock_owned_by_user(sk2))
 				udpv6_queue_rcv_skb(sk2, buff);
 			else
@@ -387,7 +387,7 @@
 			bh_unlock_sock(sk2);
 		}
 	}
-	bh_lock_sock_nested(sk);
+	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk))
 		udpv6_queue_rcv_skb(sk, skb);
 	else
@@ -508,7 +508,7 @@
 
 	/* deliver */
 
-	bh_lock_sock_nested(sk);
+	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk))
 		udpv6_queue_rcv_skb(sk, skb);
 	else
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index bdfb774..77228f2 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -100,7 +100,7 @@
 
 	trans = rxrpc_get_transport(local, peer, GFP_NOIO);
 	rxrpc_put_peer(peer);
-	if (!trans) {
+	if (IS_ERR(trans)) {
 		_debug("no trans");
 		ret = -EBUSY;
 		goto error;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 26c7e1f..9974b3f 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -751,7 +751,7 @@
 	struct nlattr *tb[TCA_ACT_MAX+1];
 	struct nlattr *kind;
 	struct tc_action *a = create_a(0);
-	int err = -EINVAL;
+	int err = -ENOMEM;
 
 	if (a == NULL) {
 		printk("tca_action_flush: couldnt create tc_action\n");
@@ -762,7 +762,7 @@
 	if (!skb) {
 		printk("tca_action_flush: failed skb alloc\n");
 		kfree(a);
-		return -ENOBUFS;
+		return err;
 	}
 
 	b = skb_tail_pointer(skb);
@@ -790,6 +790,8 @@
 	err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
 	if (err < 0)
 		goto nla_put_failure;
+	if (err == 0)
+		goto noflush_out;
 
 	nla_nest_end(skb, nest);
 
@@ -807,6 +809,7 @@
 nlmsg_failure:
 	module_put(a->ops->owner);
 err_out:
+noflush_out:
 	kfree_skb(skb);
 	kfree(a);
 	return err;
@@ -824,8 +827,10 @@
 		return ret;
 
 	if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
-		if (tb[0] != NULL && tb[1] == NULL)
-			return tca_action_flush(tb[0], n, pid);
+		if (tb[1] != NULL)
+			return tca_action_flush(tb[1], n, pid);
+		else
+			return -EINVAL;
 	}
 
 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ba1d121..c25465e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -183,6 +183,21 @@
    (root qdisc, all its children, children of children etc.)
  */
 
+struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
+{
+	struct Qdisc *q;
+
+	if (!(root->flags & TCQ_F_BUILTIN) &&
+	    root->handle == handle)
+		return root;
+
+	list_for_each_entry(q, &root->list, list) {
+		if (q->handle == handle)
+			return q;
+	}
+	return NULL;
+}
+
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 {
 	unsigned int i;
@@ -191,16 +206,11 @@
 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 		struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
 
-		if (!(txq_root->flags & TCQ_F_BUILTIN) &&
-		    txq_root->handle == handle)
-			return txq_root;
-
-		list_for_each_entry(q, &txq_root->list, list) {
-			if (q->handle == handle)
-				return q;
-		}
+		q = qdisc_match_from_root(txq_root, handle);
+		if (q)
+			return q;
 	}
-	return NULL;
+	return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
 }
 
 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -321,7 +331,7 @@
 	if (!s || tsize != s->tsize || (!tab && tsize > 0))
 		return ERR_PTR(-EINVAL);
 
-	spin_lock(&qdisc_stab_lock);
+	spin_lock_bh(&qdisc_stab_lock);
 
 	list_for_each_entry(stab, &qdisc_stab_list, list) {
 		if (memcmp(&stab->szopts, s, sizeof(*s)))
@@ -329,11 +339,11 @@
 		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
 			continue;
 		stab->refcnt++;
-		spin_unlock(&qdisc_stab_lock);
+		spin_unlock_bh(&qdisc_stab_lock);
 		return stab;
 	}
 
-	spin_unlock(&qdisc_stab_lock);
+	spin_unlock_bh(&qdisc_stab_lock);
 
 	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
 	if (!stab)
@@ -344,9 +354,9 @@
 	if (tsize > 0)
 		memcpy(stab->data, tab, tsize * sizeof(u16));
 
-	spin_lock(&qdisc_stab_lock);
+	spin_lock_bh(&qdisc_stab_lock);
 	list_add_tail(&stab->list, &qdisc_stab_list);
-	spin_unlock(&qdisc_stab_lock);
+	spin_unlock_bh(&qdisc_stab_lock);
 
 	return stab;
 }
@@ -356,14 +366,14 @@
 	if (!tab)
 		return;
 
-	spin_lock(&qdisc_stab_lock);
+	spin_lock_bh(&qdisc_stab_lock);
 
 	if (--tab->refcnt == 0) {
 		list_del(&tab->list);
 		kfree(tab);
 	}
 
-	spin_unlock(&qdisc_stab_lock);
+	spin_unlock_bh(&qdisc_stab_lock);
 }
 EXPORT_SYMBOL(qdisc_put_stab);
 
@@ -908,7 +918,7 @@
 					return -ENOENT;
 				q = qdisc_leaf(p, clid);
 			} else { /* ingress */
-				q = dev->rx_queue.qdisc;
+				q = dev->rx_queue.qdisc_sleeping;
 			}
 		} else {
 			struct netdev_queue *dev_queue;
@@ -978,7 +988,7 @@
 					return -ENOENT;
 				q = qdisc_leaf(p, clid);
 			} else { /*ingress */
-				q = dev->rx_queue.qdisc;
+				q = dev->rx_queue.qdisc_sleeping;
 			}
 		} else {
 			struct netdev_queue *dev_queue;
@@ -1529,11 +1539,11 @@
 	t = 0;
 
 	dev_queue = netdev_get_tx_queue(dev, 0);
-	if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0)
+	if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
 		goto done;
 
 	dev_queue = &dev->rx_queue;
-	if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0)
+	if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
 		goto done;
 
 done:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7cf83b3..4685746 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -647,7 +647,7 @@
 	}
 }
 
-static bool some_qdisc_is_running(struct net_device *dev, int lock)
+static bool some_qdisc_is_busy(struct net_device *dev, int lock)
 {
 	unsigned int i;
 
@@ -658,13 +658,14 @@
 		int val;
 
 		dev_queue = netdev_get_tx_queue(dev, i);
-		q = dev_queue->qdisc;
+		q = dev_queue->qdisc_sleeping;
 		root_lock = qdisc_lock(q);
 
 		if (lock)
 			spin_lock_bh(root_lock);
 
-		val = test_bit(__QDISC_STATE_RUNNING, &q->state);
+		val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
+		       test_bit(__QDISC_STATE_SCHED, &q->state));
 
 		if (lock)
 			spin_unlock_bh(root_lock);
@@ -689,14 +690,14 @@
 
 	/* Wait for outstanding qdisc_run calls. */
 	do {
-		while (some_qdisc_is_running(dev, 0))
+		while (some_qdisc_is_busy(dev, 0))
 			yield();
 
 		/*
 		 * Double-check inside queue lock to ensure that all effects
 		 * of the queue run are visible when we return.
 		 */
-		running = some_qdisc_is_running(dev, 1);
+		running = some_qdisc_is_busy(dev, 1);
 
 		/*
 		 * The running flag should never be set at this point because
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index be35422..6febd24 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1279,7 +1279,8 @@
 
 	/* delete from hash and active; remainder in destroy_class */
 	qdisc_class_hash_remove(&q->clhash, &cl->common);
-	cl->parent->children--;
+	if (cl->parent)
+		cl->parent->children--;
 
 	if (cl->prio_activity)
 		htb_deactivate(q, cl);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0326d30..0747d8a 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -85,7 +85,7 @@
 
 static u32 htohl(u32 in, int swap)
 {
-	return swap ? (u32)___constant_swab32(in) : in;
+	return swap ? swab32(in) : in;
 }
 
 /**
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index df5b388..d98ffb7 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1277,6 +1277,7 @@
 	r->ifi_flags = dev_get_flags(dev);
 	r->ifi_change = 0;	/* Wireless changes don't affect those flags */
 
+	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
 	/* Add the wireless events in the netlink packet */
 	NLA_PUT(skb, IFLA_WIRELESS, event_len, event);
 
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 3f964db..ac25b4c 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -112,16 +112,13 @@
 int xfrm_output_resume(struct sk_buff *skb, int err)
 {
 	while (likely((err = xfrm_output_one(skb, err)) == 0)) {
-		struct xfrm_state *x;
-
 		nf_reset(skb);
 
 		err = skb->dst->ops->local_out(skb);
 		if (unlikely(err != 1))
 			goto out;
 
-		x = skb->dst->xfrm;
-		if (!x)
+		if (!skb->dst->xfrm)
 			return dst_output(skb);
 
 		err = nf_hook(skb->dst->ops->family,